hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1a3a64b23cac5bab2f266d868c0775b45a843256 | 2,400 | py | Python | twitter_stats.py | bit2pixel/watch-twitter-unfollowers | 4f13e3c6f95bd350e06fdb37ac7dd8f95f7a80fa | [
"MIT"
] | 3 | 2016-02-23T22:54:39.000Z | 2016-04-18T05:16:49.000Z | twitter_stats.py | bit2pixel/watch-twitter-unfollowers | 4f13e3c6f95bd350e06fdb37ac7dd8f95f7a80fa | [
"MIT"
] | null | null | null | twitter_stats.py | bit2pixel/watch-twitter-unfollowers | 4f13e3c6f95bd350e06fdb37ac7dd8f95f7a80fa | [
"MIT"
] | 3 | 2016-02-21T17:50:59.000Z | 2018-09-27T03:08:30.000Z | import time
import shelve
import datetime
import settings
from twython import Twython
from contextlib import contextmanager
@contextmanager
def closing(this):
try:
yield this
finally:
this.close()
class TwitterStats():
def __init__(self):
# connect to twitter api
self.twitter = Twython(
app_key=settings.consumer_key,
app_secret=settings.consumer_secret,
oauth_token=settings.oauth_token,
oauth_token_secret=settings.oauth_token_secret
)
def init_storage(self):
storage = shelve.open('twitter_stats', writeback=True)
if not storage:
storage['followers'] = set()
storage['unfollowers'] = []
storage['unfollowers_since_last_check'] = None
storage['last_update'] = None
return storage
def get_followers(self):
follower_ids = self.twitter.getFollowersIDs()['ids']
return set(follower_ids)
def show_screen_name(self, user_id):
user = self.twitter.showUser(user_id=user_id)
screen_name = user['screen_name']
return screen_name
def update_unfollower_stats(self):
with closing(self.init_storage()) as storage:
previous_followers = storage['followers']
current_followers = self.get_followers()
new_unfollower_ids = previous_followers - current_followers
unfollowers_since_last_check = []
for follower_id in new_unfollower_ids:
unfollower = {
'id': follower_id,
'screen_name': self.show_screen_name(follower_id),
'timestamp': datetime.datetime.now().strftime('%b %d %Y %H:%M:%S')
}
storage['unfollowers'].append(unfollower)
unfollowers_since_last_check.append(unfollower)
storage['followers'] = current_followers
storage['unfollowers_since_last_check'] = unfollowers_since_last_check
storage['last_update'] = datetime.datetime.now().strftime('%b %d %Y %H:%M:%S')
def main():
twitter_stats = TwitterStats()
while True:
twitter_stats.update_unfollower_stats()
time.sleep(settings.update_interval)
if __name__ == '__main__':
main()
| 29.62963 | 90 | 0.606667 |
6b6b7973534c05588ff3575e5d9f9a643520d0e6 | 292 | py | Python | tests/__init__.py | karpierz/pyc_wheel | b2502dc0d77dbeadd554f9d385103e4fde1be6fe | [
"MIT"
] | 9 | 2020-07-30T00:56:32.000Z | 2022-03-03T19:36:27.000Z | tests/__init__.py | karpierz/pyc_wheel | b2502dc0d77dbeadd554f9d385103e4fde1be6fe | [
"MIT"
] | 5 | 2020-02-28T15:47:26.000Z | 2021-07-12T11:47:54.000Z | tests/__init__.py | karpierz/pyc_wheel | b2502dc0d77dbeadd554f9d385103e4fde1be6fe | [
"MIT"
] | 6 | 2020-04-16T16:14:14.000Z | 2021-04-06T05:44:49.000Z | # Copyright (c) 2019-2021 Adam Karpierz
# Licensed under the MIT License
# https://opensource.org/licenses/MIT
__all__ = ('top_dir', 'test_dir')
import sys, pathlib
sys.dont_write_bytecode = True
test_dir = pathlib.Path(__file__).resolve().parent
top_dir = test_dir.parent
del sys, pathlib
| 24.333333 | 50 | 0.763699 |
7326b4af9691f5a596a14bfbbc863db57be19b50 | 813 | py | Python | getcardlist.py | shinji19/sealed-deck-generator | 8321d023fbef3a4b58c37fe36ac9b225b22bb4d1 | [
"MIT"
] | null | null | null | getcardlist.py | shinji19/sealed-deck-generator | 8321d023fbef3a4b58c37fe36ac9b225b22bb4d1 | [
"MIT"
] | null | null | null | getcardlist.py | shinji19/sealed-deck-generator | 8321d023fbef3a4b58c37fe36ac9b225b22bb4d1 | [
"MIT"
] | null | null | null | from mtgsdk import Card
import pickle
DATABASE_FILE_NAME = 'database.dat'
def fetch():
print('Start request...')
card_all = Card.where(set='M21').all()
print('Success request!')
cards = []
for c in card_all:
# 番外カードは除外する
if int(c.number) < 275:
cards.append(c)
cards.sort(key=lambda x: int(x.number))
return cards
def save(card_list):
with open(DATABASE_FILE_NAME, 'wb') as f:
pickle.dump(card_list, f)
def load():
ret = []
with open(DATABASE_FILE_NAME, 'rb') as f:
ret = pickle.load(f)
return ret
def main():
card_list = fetch()
save(card_list)
card_list = load()
for card in card_list:
print('{0} ({1}) {2}'.format(card.name, card.set, card.number))
if __name__ == '__main__':
main()
| 19.357143 | 71 | 0.595326 |
7e6b6b11353fef27e6bfd2b5690c7d183e68b6c8 | 40,794 | py | Python | tests/api/test_discount.py | fooliscool/saleor | 9502467c0e745eb8afdbfa373d634814d133e864 | [
"CC-BY-4.0"
] | 1 | 2017-01-07T04:04:39.000Z | 2017-01-07T04:04:39.000Z | tests/api/test_discount.py | fooliscool/saleor | 9502467c0e745eb8afdbfa373d634814d133e864 | [
"CC-BY-4.0"
] | 14 | 2021-02-02T22:31:33.000Z | 2022-03-12T00:16:06.000Z | tests/api/test_discount.py | fooliscool/saleor | 9502467c0e745eb8afdbfa373d634814d133e864 | [
"CC-BY-4.0"
] | null | null | null | from datetime import timedelta
from decimal import Decimal
import graphene
import pytest
from django.utils import timezone
from django_countries import countries
from saleor.discount import DiscountValueType, VoucherType
from saleor.discount.models import Sale, Voucher
from saleor.graphql.discount.enums import DiscountValueTypeEnum, VoucherTypeEnum
from tests.api.utils import get_graphql_content
@pytest.fixture
def voucher_countries(voucher):
voucher.countries = countries
voucher.save(update_fields=["countries"])
return voucher
@pytest.fixture
def query_vouchers_with_filter():
query = """
query ($filter: VoucherFilterInput!, ) {
vouchers(first:5, filter: $filter){
edges{
node{
id
name
startDate
}
}
}
}
"""
return query
@pytest.fixture
def query_sales_with_filter():
query = """
query ($filter: SaleFilterInput!, ) {
sales(first:5, filter: $filter){
edges{
node{
id
name
startDate
}
}
}
}
"""
return query
@pytest.fixture
def sale():
return Sale.objects.create(name="Sale", value=123)
@pytest.fixture
def voucher():
return Voucher.objects.create(name="Voucher", discount_value=123)
def test_voucher_query(
staff_api_client, voucher_countries, permission_manage_discounts
):
query = """
query vouchers {
vouchers(first: 1) {
edges {
node {
type
name
code
usageLimit
used
startDate
discountValueType
discountValue
applyOncePerCustomer
countries {
code
country
}
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["vouchers"]["edges"][0]["node"]
assert data["type"] == voucher_countries.type.upper()
assert data["name"] == voucher_countries.name
assert data["code"] == voucher_countries.code
assert data["usageLimit"] == voucher_countries.usage_limit
assert data["applyOncePerCustomer"] == voucher_countries.apply_once_per_customer
assert data["used"] == voucher_countries.used
assert data["startDate"] == voucher_countries.start_date.isoformat()
assert data["discountValueType"] == voucher_countries.discount_value_type.upper()
assert data["discountValue"] == voucher_countries.discount_value
assert data["countries"] == [
{"country": country.name, "code": country.code}
for country in voucher_countries.countries
]
def test_sale_query(staff_api_client, sale, permission_manage_discounts):
query = """
query sales {
sales(first: 1) {
edges {
node {
type
name
value
startDate
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["sales"]["edges"][0]["node"]
assert data["type"] == sale.type.upper()
assert data["name"] == sale.name
assert data["value"] == sale.value
assert data["startDate"] == sale.start_date.isoformat()
CREATE_VOUCHER_MUTATION = """
mutation voucherCreate(
$type: VoucherTypeEnum, $name: String, $code: String,
$discountValueType: DiscountValueTypeEnum, $usageLimit: Int,
$discountValue: Decimal, $minAmountSpent: Decimal, $minCheckoutItemsQuantity: Int,
$startDate: DateTime, $endDate: DateTime, $applyOncePerOrder: Boolean,
$applyOncePerCustomer: Boolean) {
voucherCreate(input: {
name: $name, type: $type, code: $code,
discountValueType: $discountValueType,
discountValue: $discountValue, minAmountSpent: $minAmountSpent,
minCheckoutItemsQuantity: $minCheckoutItemsQuantity,
startDate: $startDate, endDate: $endDate, usageLimit: $usageLimit
applyOncePerOrder: $applyOncePerOrder,
applyOncePerCustomer: $applyOncePerCustomer}) {
errors {
field
message
}
voucher {
type
minSpent {
amount
}
minAmountSpent {
amount
}
minCheckoutItemsQuantity
name
code
discountValueType
startDate
endDate
applyOncePerOrder
applyOncePerCustomer
}
}
}
"""
def test_create_voucher(staff_api_client, permission_manage_discounts):
start_date = timezone.now() - timedelta(days=365)
end_date = timezone.now() + timedelta(days=365)
variables = {
"name": "test voucher",
"type": VoucherTypeEnum.ENTIRE_ORDER.name,
"code": "testcode123",
"discountValueType": DiscountValueTypeEnum.FIXED.name,
"discountValue": 10.12,
"minAmountSpent": 1.12,
"minCheckoutItemsQuantity": 10,
"startDate": start_date.isoformat(),
"endDate": end_date.isoformat(),
"applyOncePerOrder": True,
"applyOncePerCustomer": True,
"usageLimit": 3,
}
response = staff_api_client.post_graphql(
CREATE_VOUCHER_MUTATION, variables, permissions=[permission_manage_discounts]
)
get_graphql_content(response)
voucher = Voucher.objects.get()
assert voucher.type == VoucherType.ENTIRE_ORDER
assert voucher.min_spent_amount == Decimal("1.12")
assert voucher.name == "test voucher"
assert voucher.code == "testcode123"
assert voucher.discount_value_type == DiscountValueType.FIXED
assert voucher.start_date == start_date
assert voucher.end_date == end_date
assert voucher.apply_once_per_order
assert voucher.apply_once_per_customer
assert voucher.usage_limit == 3
def test_create_voucher_with_empty_code(staff_api_client, permission_manage_discounts):
start_date = timezone.now() - timedelta(days=365)
end_date = timezone.now() + timedelta(days=365)
variables = {
"name": "test voucher",
"type": VoucherTypeEnum.ENTIRE_ORDER.name,
"code": "",
"discountValueType": DiscountValueTypeEnum.FIXED.name,
"discountValue": 10.12,
"minSpent": 1.12,
"startDate": start_date.isoformat(),
"endDate": end_date.isoformat(),
"usageLimit": None,
}
response = staff_api_client.post_graphql(
CREATE_VOUCHER_MUTATION, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["voucherCreate"]["voucher"]
assert data["name"] == variables["name"]
assert data["code"] != ""
def test_create_voucher_with_existing_gift_card_code(
staff_api_client, gift_card, permission_manage_discounts
):
start_date = timezone.now() - timedelta(days=365)
end_date = timezone.now() + timedelta(days=365)
variables = {
"name": "test voucher",
"type": VoucherTypeEnum.ENTIRE_ORDER.name,
"code": gift_card.code,
"discountValueType": DiscountValueTypeEnum.FIXED.name,
"discountValue": 10.12,
"minAmountSpent": 1.12,
"startDate": start_date.isoformat(),
"endDate": end_date.isoformat(),
"usageLimit": 3,
}
response = staff_api_client.post_graphql(
CREATE_VOUCHER_MUTATION, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
assert content["data"]["voucherCreate"]["errors"]
errors = content["data"]["voucherCreate"]["errors"]
assert len(errors) == 1
assert errors[0]["field"] == "promoCode"
def test_create_voucher_with_existing_voucher_code(
staff_api_client, voucher_shipping_type, permission_manage_discounts
):
start_date = timezone.now() - timedelta(days=365)
end_date = timezone.now() + timedelta(days=365)
variables = {
"name": "test voucher",
"type": VoucherTypeEnum.ENTIRE_ORDER.name,
"code": voucher_shipping_type.code,
"discountValueType": DiscountValueTypeEnum.FIXED.name,
"discountValue": 10.12,
"minAmountSpent": 1.12,
"startDate": start_date.isoformat(),
"endDate": end_date.isoformat(),
"usageLimit": 3,
}
response = staff_api_client.post_graphql(
CREATE_VOUCHER_MUTATION, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
assert content["data"]["voucherCreate"]["errors"]
errors = content["data"]["voucherCreate"]["errors"]
assert len(errors) == 1
assert errors[0]["field"] == "promoCode"
def test_update_voucher(staff_api_client, voucher, permission_manage_discounts):
query = """
mutation voucherUpdate($code: String,
$discountValueType: DiscountValueTypeEnum, $id: ID!,
$applyOncePerOrder: Boolean, $minCheckoutItemsQuantity: Int) {
voucherUpdate(id: $id, input: {
code: $code, discountValueType: $discountValueType,
applyOncePerOrder: $applyOncePerOrder,
minCheckoutItemsQuantity: $minCheckoutItemsQuantity
}) {
errors {
field
message
}
voucher {
code
discountValueType
applyOncePerOrder
minCheckoutItemsQuantity
}
}
}
"""
apply_once_per_order = not voucher.apply_once_per_order
# Set discount value type to 'fixed' and change it in mutation
voucher.discount_value_type = DiscountValueType.FIXED
voucher.save()
assert voucher.code != "testcode123"
variables = {
"id": graphene.Node.to_global_id("Voucher", voucher.id),
"code": "testcode123",
"discountValueType": DiscountValueTypeEnum.PERCENTAGE.name,
"applyOncePerOrder": apply_once_per_order,
"minCheckoutItemsQuantity": 10,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["voucherUpdate"]["voucher"]
assert data["code"] == "testcode123"
assert data["discountValueType"] == DiscountValueType.PERCENTAGE.upper()
assert data["applyOncePerOrder"] == apply_once_per_order
assert data["minCheckoutItemsQuantity"] == 10
def test_voucher_delete_mutation(
staff_api_client, voucher, permission_manage_discounts
):
query = """
mutation DeleteVoucher($id: ID!) {
voucherDelete(id: $id) {
voucher {
name
id
}
errors {
field
message
}
}
}
"""
variables = {"id": graphene.Node.to_global_id("Voucher", voucher.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["voucherDelete"]
assert data["voucher"]["name"] == voucher.name
with pytest.raises(voucher._meta.model.DoesNotExist):
voucher.refresh_from_db()
def test_voucher_add_catalogues(
staff_api_client,
voucher,
category,
product,
collection,
permission_manage_discounts,
):
query = """
mutation voucherCataloguesAdd($id: ID!, $input: CatalogueInput!) {
voucherCataloguesAdd(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
product_id = graphene.Node.to_global_id("Product", product.id)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
category_id = graphene.Node.to_global_id("Category", category.id)
variables = {
"id": graphene.Node.to_global_id("Voucher", voucher.id),
"input": {
"products": [product_id],
"collections": [collection_id],
"categories": [category_id],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["voucherCataloguesAdd"]
assert not data["errors"]
assert product in voucher.products.all()
assert category in voucher.categories.all()
assert collection in voucher.collections.all()
def test_voucher_remove_catalogues(
staff_api_client,
voucher,
category,
product,
collection,
permission_manage_discounts,
):
voucher.products.add(product)
voucher.collections.add(collection)
voucher.categories.add(category)
query = """
mutation voucherCataloguesRemove($id: ID!, $input: CatalogueInput!) {
voucherCataloguesRemove(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
product_id = graphene.Node.to_global_id("Product", product.id)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
category_id = graphene.Node.to_global_id("Category", category.id)
variables = {
"id": graphene.Node.to_global_id("Voucher", voucher.id),
"input": {
"products": [product_id],
"collections": [collection_id],
"categories": [category_id],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["voucherCataloguesRemove"]
assert not data["errors"]
assert product not in voucher.products.all()
assert category not in voucher.categories.all()
assert collection not in voucher.collections.all()
def test_voucher_add_no_catalogues(
staff_api_client, voucher, permission_manage_discounts
):
query = """
mutation voucherCataloguesAdd($id: ID!, $input: CatalogueInput!) {
voucherCataloguesAdd(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
variables = {
"id": graphene.Node.to_global_id("Voucher", voucher.id),
"input": {"products": [], "collections": [], "categories": []},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["voucherCataloguesAdd"]
assert not data["errors"]
assert not voucher.products.exists()
assert not voucher.categories.exists()
assert not voucher.collections.exists()
def test_voucher_remove_no_catalogues(
staff_api_client,
voucher,
category,
product,
collection,
permission_manage_discounts,
):
voucher.products.add(product)
voucher.collections.add(collection)
voucher.categories.add(category)
query = """
mutation voucherCataloguesAdd($id: ID!, $input: CatalogueInput!) {
voucherCataloguesAdd(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
variables = {
"id": graphene.Node.to_global_id("Voucher", voucher.id),
"input": {"products": [], "collections": [], "categories": []},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["voucherCataloguesAdd"]
assert not data["errors"]
assert voucher.products.exists()
assert voucher.categories.exists()
assert voucher.collections.exists()
def test_create_sale(staff_api_client, permission_manage_discounts):
query = """
mutation saleCreate(
$type: DiscountValueTypeEnum, $name: String, $value: Decimal,
$startDate: DateTime, $endDate: DateTime) {
saleCreate(input: {
name: $name, type: $type, value: $value,
startDate: $startDate, endDate: $endDate}) {
sale {
type
name
value
startDate
endDate
}
errors {
field
message
}
}
}
"""
start_date = timezone.now() - timedelta(days=365)
end_date = timezone.now() + timedelta(days=365)
variables = {
"name": "test sale",
"type": DiscountValueTypeEnum.FIXED.name,
"value": "10.12",
"startDate": start_date.isoformat(),
"endDate": end_date.isoformat(),
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["saleCreate"]["sale"]
assert data["type"] == DiscountValueType.FIXED.upper()
assert data["name"] == "test sale"
assert data["value"] == 10.12
assert data["startDate"] == start_date.isoformat()
assert data["endDate"] == end_date.isoformat()
def test_update_sale(staff_api_client, sale, permission_manage_discounts):
query = """
mutation saleUpdate($type: DiscountValueTypeEnum, $id: ID!) {
saleUpdate(id: $id, input: {type: $type}) {
errors {
field
message
}
sale {
type
}
}
}
"""
# Set discount value type to 'fixed' and change it in mutation
sale.type = DiscountValueType.FIXED
sale.save()
variables = {
"id": graphene.Node.to_global_id("Sale", sale.id),
"type": DiscountValueTypeEnum.PERCENTAGE.name,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["saleUpdate"]["sale"]
assert data["type"] == DiscountValueType.PERCENTAGE.upper()
def test_sale_delete_mutation(staff_api_client, sale, permission_manage_discounts):
query = """
mutation DeleteSale($id: ID!) {
saleDelete(id: $id) {
sale {
name
id
}
errors {
field
message
}
}
}
"""
variables = {"id": graphene.Node.to_global_id("Sale", sale.id)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["saleDelete"]
assert data["sale"]["name"] == sale.name
with pytest.raises(sale._meta.model.DoesNotExist):
sale.refresh_from_db()
def test_sale_add_catalogues(
staff_api_client, sale, category, product, collection, permission_manage_discounts
):
query = """
mutation saleCataloguesAdd($id: ID!, $input: CatalogueInput!) {
saleCataloguesAdd(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
product_id = graphene.Node.to_global_id("Product", product.id)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
category_id = graphene.Node.to_global_id("Category", category.id)
variables = {
"id": graphene.Node.to_global_id("Sale", sale.id),
"input": {
"products": [product_id],
"collections": [collection_id],
"categories": [category_id],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["saleCataloguesAdd"]
assert not data["errors"]
assert product in sale.products.all()
assert category in sale.categories.all()
assert collection in sale.collections.all()
def test_sale_remove_catalogues(
staff_api_client, sale, category, product, collection, permission_manage_discounts
):
sale.products.add(product)
sale.collections.add(collection)
sale.categories.add(category)
query = """
mutation saleCataloguesRemove($id: ID!, $input: CatalogueInput!) {
saleCataloguesRemove(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
product_id = graphene.Node.to_global_id("Product", product.id)
collection_id = graphene.Node.to_global_id("Collection", collection.id)
category_id = graphene.Node.to_global_id("Category", category.id)
variables = {
"id": graphene.Node.to_global_id("Sale", sale.id),
"input": {
"products": [product_id],
"collections": [collection_id],
"categories": [category_id],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["saleCataloguesRemove"]
assert not data["errors"]
assert product not in sale.products.all()
assert category not in sale.categories.all()
assert collection not in sale.collections.all()
def test_sale_add_no_catalogues(staff_api_client, sale, permission_manage_discounts):
query = """
mutation saleCataloguesAdd($id: ID!, $input: CatalogueInput!) {
saleCataloguesAdd(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
variables = {
"id": graphene.Node.to_global_id("Sale", sale.id),
"input": {"products": [], "collections": [], "categories": []},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["saleCataloguesAdd"]
assert not data["errors"]
assert not sale.products.exists()
assert not sale.categories.exists()
assert not sale.collections.exists()
def test_sale_remove_no_catalogues(
staff_api_client, sale, category, product, collection, permission_manage_discounts
):
sale.products.add(product)
sale.collections.add(collection)
sale.categories.add(category)
query = """
mutation saleCataloguesAdd($id: ID!, $input: CatalogueInput!) {
saleCataloguesAdd(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
variables = {
"id": graphene.Node.to_global_id("Sale", sale.id),
"input": {"products": [], "collections": [], "categories": []},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["saleCataloguesAdd"]
assert not data["errors"]
assert sale.products.exists()
assert sale.categories.exists()
assert sale.collections.exists()
@pytest.mark.parametrize(
"voucher_filter, start_date, end_date, count",
[
(
{"status": "ACTIVE"},
timezone.now().replace(year=2015, month=1, day=1),
timezone.now() + timedelta(days=365),
2,
),
(
{"status": "EXPIRED"},
timezone.now().replace(year=2015, month=1, day=1),
timezone.now().replace(year=2018, month=1, day=1),
1,
),
(
{"status": "SCHEDULED"},
timezone.now() + timedelta(days=3),
timezone.now() + timedelta(days=10),
1,
),
],
)
def test_query_vouchers_with_filter_status(
voucher_filter,
start_date,
end_date,
count,
staff_api_client,
query_vouchers_with_filter,
permission_manage_discounts,
):
Voucher.objects.bulk_create(
[
Voucher(
name="Voucher1",
discount_value=123,
code="abc",
start_date=timezone.now(),
),
Voucher(
name="Voucher2",
discount_value=123,
code="123",
start_date=start_date,
end_date=end_date,
),
]
)
variables = {"filter": voucher_filter}
response = staff_api_client.post_graphql(
query_vouchers_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["vouchers"]["edges"]
assert len(data) == count
@pytest.mark.parametrize(
"voucher_filter, count",
[
({"timesUsed": {"gte": 1, "lte": 5}}, 1),
({"timesUsed": {"lte": 3}}, 2),
({"timesUsed": {"gte": 2}}, 1),
],
)
def test_query_vouchers_with_filter_times_used(
voucher_filter,
count,
staff_api_client,
query_vouchers_with_filter,
permission_manage_discounts,
):
Voucher.objects.bulk_create(
[
Voucher(name="Voucher1", discount_value=123, code="abc"),
Voucher(name="Voucher2", discount_value=123, code="123", used=2),
]
)
variables = {"filter": voucher_filter}
response = staff_api_client.post_graphql(
query_vouchers_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["vouchers"]["edges"]
assert len(data) == count
@pytest.mark.parametrize(
"voucher_filter, count",
[
({"started": {"gte": "2019-04-18T00:00:00+00:00"}}, 1),
({"started": {"lte": "2012-01-14T00:00:00+00:00"}}, 1),
(
{
"started": {
"lte": "2012-01-15T00:00:00+00:00",
"gte": "2012-01-01T00:00:00+00:00",
}
},
1,
),
({"started": {"gte": "2012-01-03T00:00:00+00:00"}}, 2),
],
)
def test_query_vouchers_with_filter_started(
voucher_filter,
count,
staff_api_client,
query_vouchers_with_filter,
permission_manage_discounts,
):
Voucher.objects.bulk_create(
[
Voucher(name="Voucher1", discount_value=123, code="abc"),
Voucher(
name="Voucher2",
discount_value=123,
code="123",
start_date=timezone.now().replace(year=2012, month=1, day=5),
),
]
)
variables = {"filter": voucher_filter}
response = staff_api_client.post_graphql(
query_vouchers_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["vouchers"]["edges"]
assert len(data) == count
@pytest.mark.parametrize(
"voucher_filter, count, discount_value_type",
[
({"discountType": "PERCENTAGE"}, 1, DiscountValueType.PERCENTAGE),
({"discountType": "FIXED"}, 2, DiscountValueType.FIXED),
],
)
def test_query_vouchers_with_filter_discount_type(
voucher_filter,
count,
discount_value_type,
staff_api_client,
query_vouchers_with_filter,
permission_manage_discounts,
):
Voucher.objects.bulk_create(
[
Voucher(
name="Voucher1",
discount_value=123,
code="abc",
discount_value_type=DiscountValueType.FIXED,
),
Voucher(
name="Voucher2",
discount_value=123,
code="123",
discount_value_type=discount_value_type,
),
]
)
variables = {"filter": voucher_filter}
response = staff_api_client.post_graphql(
query_vouchers_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["vouchers"]["edges"]
assert len(data) == count
@pytest.mark.parametrize(
"voucher_filter, count", [({"search": "Big"}, 1), ({"search": "GIFT"}, 2)]
)
def test_query_vouchers_with_filter_search(
voucher_filter,
count,
staff_api_client,
query_vouchers_with_filter,
permission_manage_discounts,
):
Voucher.objects.bulk_create(
[
Voucher(name="The Biggest Voucher", discount_value=123, code="GIFT"),
Voucher(name="Voucher2", discount_value=123, code="GIFT-COUPON"),
]
)
variables = {"filter": voucher_filter}
response = staff_api_client.post_graphql(
query_vouchers_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["vouchers"]["edges"]
assert len(data) == count
QUERY_VOUCHER_WITH_SORT = """
query ($sort_by: VoucherSortingInput!) {
vouchers(first:5, sortBy: $sort_by) {
edges{
node{
name
}
}
}
}
"""
@pytest.mark.parametrize(
"voucher_sort, result_order",
[
(
{"field": "CODE", "direction": "ASC"},
["Voucher2", "Voucher1", "FreeShipping"],
),
(
{"field": "CODE", "direction": "DESC"},
["FreeShipping", "Voucher1", "Voucher2"],
),
(
{"field": "VALUE", "direction": "ASC"},
["Voucher2", "FreeShipping", "Voucher1"],
),
(
{"field": "VALUE", "direction": "DESC"},
["Voucher1", "FreeShipping", "Voucher2"],
),
(
{"field": "TYPE", "direction": "ASC"},
["Voucher1", "Voucher2", "FreeShipping"],
),
(
{"field": "TYPE", "direction": "DESC"},
["FreeShipping", "Voucher1", "Voucher2"],
),
(
{"field": "START_DATE", "direction": "ASC"},
["FreeShipping", "Voucher2", "Voucher1"],
),
(
{"field": "START_DATE", "direction": "DESC"},
["Voucher1", "Voucher2", "FreeShipping"],
),
(
{"field": "END_DATE", "direction": "ASC"},
["Voucher2", "FreeShipping", "Voucher1"],
),
(
{"field": "END_DATE", "direction": "DESC"},
["Voucher1", "FreeShipping", "Voucher2"],
),
(
{"field": "USAGE_LIMIT", "direction": "ASC"},
["Voucher1", "FreeShipping", "Voucher2"],
),
(
{"field": "USAGE_LIMIT", "direction": "DESC"},
["Voucher2", "FreeShipping", "Voucher1"],
),
(
{"field": "MINIMUM_SPENT_AMOUNT", "direction": "ASC"},
["Voucher2", "FreeShipping", "Voucher1"],
),
(
{"field": "MINIMUM_SPENT_AMOUNT", "direction": "DESC"},
["Voucher1", "FreeShipping", "Voucher2"],
),
],
)
def test_query_vouchers_with_sort(
voucher_sort, result_order, staff_api_client, permission_manage_discounts
):
Voucher.objects.bulk_create(
[
Voucher(
name="Voucher1",
discount_value=123,
code="abc",
discount_value_type=DiscountValueType.FIXED,
type=VoucherType.ENTIRE_ORDER,
usage_limit=10,
),
Voucher(
name="Voucher2",
discount_value=23,
code="123",
discount_value_type=DiscountValueType.FIXED,
type=VoucherType.ENTIRE_ORDER,
start_date=timezone.now().replace(year=2012, month=1, day=5),
end_date=timezone.now().replace(year=2013, month=1, day=5),
min_spent_amount=50,
),
Voucher(
name="FreeShipping",
discount_value=100,
code="xyz",
discount_value_type=DiscountValueType.PERCENTAGE,
type=VoucherType.SHIPPING,
start_date=timezone.now().replace(year=2011, month=1, day=5),
end_date=timezone.now().replace(year=2015, month=12, day=31),
usage_limit=1000,
min_spent_amount=500,
),
]
)
variables = {"sort_by": voucher_sort}
staff_api_client.user.user_permissions.add(permission_manage_discounts)
response = staff_api_client.post_graphql(QUERY_VOUCHER_WITH_SORT, variables)
content = get_graphql_content(response)
vouchers = content["data"]["vouchers"]["edges"]
for order, voucher_name in enumerate(result_order):
assert vouchers[order]["node"]["name"] == voucher_name
@pytest.mark.parametrize(
"sale_filter, start_date, end_date, count",
[
(
{"status": "ACTIVE"},
timezone.now().replace(year=2015, month=1, day=1),
timezone.now() + timedelta(days=365),
2,
),
(
{"status": "EXPIRED"},
timezone.now().replace(year=2015, month=1, day=1),
timezone.now().replace(year=2018, month=1, day=1),
1,
),
(
{"status": "SCHEDULED"},
timezone.now() + timedelta(days=3),
timezone.now() + timedelta(days=10),
1,
),
],
)
def test_query_sales_with_filter_status(
sale_filter,
start_date,
end_date,
count,
staff_api_client,
query_sales_with_filter,
permission_manage_discounts,
):
Sale.objects.bulk_create(
[
Sale(name="Sale1", value=123, start_date=timezone.now()),
Sale(name="Sale2", value=123, start_date=start_date, end_date=end_date),
]
)
variables = {"filter": sale_filter}
response = staff_api_client.post_graphql(
query_sales_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["sales"]["edges"]
assert len(data) == count
@pytest.mark.parametrize(
"sale_filter, count, sale_type",
[
({"saleType": "PERCENTAGE"}, 1, DiscountValueType.PERCENTAGE),
({"saleType": "FIXED"}, 2, DiscountValueType.FIXED),
],
)
def test_query_sales_with_filter_discount_type(
sale_filter,
count,
sale_type,
staff_api_client,
query_sales_with_filter,
permission_manage_discounts,
):
Sale.objects.bulk_create(
[
Sale(name="Sale1", value=123, type=DiscountValueType.FIXED),
Sale(name="Sale2", value=123, type=sale_type),
]
)
variables = {"filter": sale_filter}
response = staff_api_client.post_graphql(
query_sales_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["sales"]["edges"]
assert len(data) == count
@pytest.mark.parametrize(
"sale_filter, count",
[
({"started": {"gte": "2019-04-18T00:00:00+00:00"}}, 1),
({"started": {"lte": "2012-01-14T00:00:00+00:00"}}, 1),
(
{
"started": {
"lte": "2012-01-15T00:00:00+00:00",
"gte": "2012-01-01T00:00:00+00:00",
}
},
1,
),
({"started": {"gte": "2012-01-03T00:00:00+00:00"}}, 2),
],
)
def test_query_sales_with_filter_started(
sale_filter,
count,
staff_api_client,
query_sales_with_filter,
permission_manage_discounts,
):
Sale.objects.bulk_create(
[
Sale(name="Sale1", value=123),
Sale(
name="Sale2",
value=123,
start_date=timezone.now().replace(year=2012, month=1, day=5),
),
]
)
variables = {"filter": sale_filter}
response = staff_api_client.post_graphql(
query_sales_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["sales"]["edges"]
assert len(data) == count
@pytest.mark.parametrize(
"sale_filter, count",
[({"search": "Big"}, 1), ({"search": "69"}, 1), ({"search": "FIX"}, 2)],
)
def test_query_sales_with_filter_search(
sale_filter,
count,
staff_api_client,
query_sales_with_filter,
permission_manage_discounts,
):
Sale.objects.bulk_create(
[
Sale(name="BigSale", value=123, type="PERCENTAGE"),
Sale(
name="Sale2",
value=123,
type="FIXED",
start_date=timezone.now().replace(year=2012, month=1, day=5),
),
Sale(
name="Sale3",
value=69,
type="FIXED",
start_date=timezone.now().replace(year=2012, month=1, day=5),
),
]
)
variables = {"filter": sale_filter}
response = staff_api_client.post_graphql(
query_sales_with_filter, variables, permissions=[permission_manage_discounts]
)
content = get_graphql_content(response)
data = content["data"]["sales"]["edges"]
assert len(data) == count
QUERY_SALE_WITH_SORT = """
query ($sort_by: SaleSortingInput!) {
sales(first:5, sortBy: $sort_by) {
edges{
node{
name
}
}
}
}
"""
@pytest.mark.parametrize(
"sale_sort, result_order",
[
({"field": "NAME", "direction": "ASC"}, ["BigSale", "Sale2", "Sale3"]),
({"field": "NAME", "direction": "DESC"}, ["Sale3", "Sale2", "BigSale"]),
({"field": "VALUE", "direction": "ASC"}, ["Sale3", "Sale2", "BigSale"]),
({"field": "VALUE", "direction": "DESC"}, ["BigSale", "Sale2", "Sale3"]),
({"field": "TYPE", "direction": "ASC"}, ["Sale2", "Sale3", "BigSale"]),
({"field": "TYPE", "direction": "DESC"}, ["BigSale", "Sale2", "Sale3"]),
({"field": "START_DATE", "direction": "ASC"}, ["Sale3", "Sale2", "BigSale"]),
({"field": "START_DATE", "direction": "DESC"}, ["BigSale", "Sale2", "Sale3"]),
({"field": "END_DATE", "direction": "ASC"}, ["Sale2", "Sale3", "BigSale"]),
({"field": "END_DATE", "direction": "DESC"}, ["BigSale", "Sale3", "Sale2"]),
],
)
def test_query_sales_with_sort(
sale_sort, result_order, staff_api_client, permission_manage_discounts
):
Sale.objects.bulk_create(
[
Sale(name="BigSale", value=1234, type="PERCENTAGE"),
Sale(
name="Sale2",
value=123,
type="FIXED",
start_date=timezone.now().replace(year=2012, month=1, day=5),
end_date=timezone.now().replace(year=2013, month=1, day=5),
),
Sale(
name="Sale3",
value=69,
type="FIXED",
start_date=timezone.now().replace(year=2011, month=1, day=5),
end_date=timezone.now().replace(year=2015, month=12, day=31),
),
]
)
variables = {"sort_by": sale_sort}
staff_api_client.user.user_permissions.add(permission_manage_discounts)
response = staff_api_client.post_graphql(QUERY_SALE_WITH_SORT, variables)
content = get_graphql_content(response)
sales = content["data"]["sales"]["edges"]
for order, sale_name in enumerate(result_order):
assert sales[order]["node"]["name"] == sale_name
| 31.452583 | 88 | 0.578958 |
0ba4d82e10860ff375fc8e13ba320f4ec55c774a | 7,459 | py | Python | src/benchmark/trace_record.py | renkekuhlmann/gams-benchmark | 91a5a5ee5ba97de63fbbc4c2b6f0b25c5897caf6 | [
"MIT"
] | null | null | null | src/benchmark/trace_record.py | renkekuhlmann/gams-benchmark | 91a5a5ee5ba97de63fbbc4c2b6f0b25c5897caf6 | [
"MIT"
] | null | null | null | src/benchmark/trace_record.py | renkekuhlmann/gams-benchmark | 91a5a5ee5ba97de63fbbc4c2b6f0b25c5897caf6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" TraceRecord """
import os
import re
import math
TRACE_ENTRIES = [
'InputFileName', 'ModelType', 'SolverName', 'NLP', 'MIP', 'JulianDate',
'Direction', 'NumberOfEquations', 'NumberOfVariables',
'NumberOfDiscreteVariables', 'NumberOfNonZeros', 'NumberOfNonlinearNonZeros',
'OptionFile', 'ModelStatus', 'SolverStatus', 'ObjectiveValue',
'ObjectiveValueEstimate', 'ETSolver', 'ETSolve', 'ETInterface', 'ETInterfaceOverhead',
'SolverTime', 'NumberOfIterations', 'NumberOfDomainViolations', 'NumberOfNodes'
]
TRACE_ENTRIES_STRING = [
'InputFileName', 'ModelType', 'SolverName', 'NLP', 'MIP', 'OptionFile'
]
TRACE_ENTRIES_INTEGER = [
'Direction', 'NumberOfEquations', 'NumberOfVariables',
'NumberOfDiscreteVariables', 'NumberOfNonZeros', 'NumberOfNonlinearNonZeros',
'ModelStatus', 'SolverStatus', 'NumberOfIterations', 'NumberOfDomainViolations',
'NumberOfNodes'
]
TRACE_ENTRIES_REAL = [
'JulianDate', 'ObjectiveValue', 'ObjectiveValueEstimate', 'ETSolver', 'ETSolve',
'ETInterface', 'ETInterfaceOverhead', 'SolverTime'
]
class TraceRecord:
"""
Trace Record stores solve attributes that are present in a GAMS trace file
"""
def __init__(self, filename):
self.record = dict()
for key in TRACE_ENTRIES:
self.record[key] = None
self.record['InputFileName'] = filename
# some default values necessary for correct paver input
self.record['Direction'] = 0
self.record['SolverStatus'] = 13
self.record['ModelStatus'] = 12
self.record['SolverTime'] = 0
def load_lst(self, lstfile):
"""
Loads solve attributes from a listing file
Arguments
---------
lstfile: str
Path to listing file
"""
if not os.path.exists(lstfile):
return
with open(lstfile, 'r') as fio:
lines = fio.readlines()
for line in lines:
if re.findall(r"^\*\*\*\* SOLVER STATUS.*[0-9]+", line):
match = re.findall("[0-9]+", line)[0]
try:
self.record['SolverStatus'] = int(match)
except ValueError:
self.record['SolverStatus'] = None
if re.findall(r"^\*\*\*\* MODEL STATUS.*[0-9]+", line):
match = re.findall("[0-9]+", line)[0]
try:
self.record['ModelStatus'] = int(match)
except ValueError:
self.record['ModelStatus'] = None
if re.findall(r"TYPE.*DIRECTION", line):
tmp = list(filter(None, line.replace('\n', '').split(" ")))
self.record['ModelType'] = tmp[1]
if tmp[3] == "MINIMIZE":
self.record['Direction'] = 0
elif tmp[3] == "MAXIMIZE":
self.record['Direction'] = 1
if re.findall(r"RESOURCE USAGE, LIMIT", line):
match = list(filter(None, line.replace('\n', '').split(" ")))
try:
self.record['SolverTime'] = float(match[3])
except ValueError:
self.record['SolverTime'] = None
def load_trc(self, trcfile):
"""
Loads solve attributes from a trace file
Arguments
---------
trcfile: str
Path to trace file
"""
# pylint: disable=too-many-branches,too-many-statements
with open(trcfile, 'r') as fio:
lines = fio.readlines()
header = list()
header_read = False
header_it = iter(header)
traceopt = 3
for line in lines:
# read header
if line[0] == '*':
# skip GamsSolve, GamsExit line
if line.find('GamsSolve') >= 0:
continue
if line.find('GamsExit') >= 0:
continue
if line.find('Trace Record Definition') >= 0:
header_read = True
continue
if header_read:
# remove '*' and spaces
line = line[1:].strip()
# empty comment line -> end of header
if len(line) == 0:
header_read = False
header_it = iter(header)
continue
if line[0] == ',':
line = line[1:]
if line[-1] == ',':
line = line[:-1]
if line[-2:] == '\\n':
traceopt = 5
line = line[:-2]
# append to trace record definition
for key in line.split(','):
header.append(key.strip())
continue
# get elements
if traceopt == 3:
elements = line.split(',')
elif traceopt == 5:
elements = [line.replace('\n', '')]
for element in elements:
element = element.strip()
# update iterator
current_header = next(header_it)
if current_header == header[-1]:
header_it = iter(header)
# parse element
if element == "NA" or len(element) == 0:
element = None
if current_header in TRACE_ENTRIES_INTEGER:
try:
element = int(element)
except (ValueError, TypeError):
element = None
if current_header in TRACE_ENTRIES_REAL:
try:
element = float(element)
except (ValueError, TypeError):
element = None
# store element
if current_header in TRACE_ENTRIES:
self.record[current_header] = element
def write_header(self, trcfile):
"""
Writes trace file definition to trace file
Arguments
---------
trcfile: str
Path to trace file
"""
with open(trcfile, 'w') as fio:
fio.write("* Trace Record Definition\n")
for i, key in enumerate(self.record):
fio.write("* %s" % key)
if i < len(self.record)-1:
fio.write(",")
fio.write("\n")
fio.write("*\n")
def write_record(self, trcfile):
"""
Writes the trace record to trace file
Arguments
---------
trcfile: str
Path to trace file
"""
with open(trcfile, 'a') as fio:
for i, (_, value) in enumerate(self.record.items()):
if i > 0:
fio.write(",")
if value is None or (isinstance(value, float) and math.isnan(value)):
fio.write("NA")
else:
fio.write(str(value))
fio.write("\n")
def write(self, trcfile):
"""
Writes trace file record to trace file incl. trace file definition
Arguments
---------
trcfile: str
Path to trace file
"""
self.write_header(trcfile)
self.write_record(trcfile)
| 32.290043 | 90 | 0.486124 |
04b4dbcf978b1e582f5aceb45d2cf98bbf331ec6 | 2,482 | py | Python | tests/sentry/models/test_event.py | JannKleen/sentry | 8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88 | [
"BSD-3-Clause"
] | 1 | 2019-02-27T15:13:06.000Z | 2019-02-27T15:13:06.000Z | tests/sentry/models/test_event.py | rmax/sentry | 8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88 | [
"BSD-3-Clause"
] | 5 | 2020-07-17T11:20:41.000Z | 2021-05-09T12:16:53.000Z | tests/sentry/models/test_event.py | zaasmi/codeerrorhelp | 1ab8d3e314386b9b2d58dad9df45355bf6014ac9 | [
"BSD-3-Clause"
] | 2 | 2021-01-26T09:53:39.000Z | 2022-03-22T09:01:47.000Z | from __future__ import absolute_import
from sentry.testutils import TestCase
class EventTest(TestCase):
def test_legacy_tags(self):
event = self.create_event(
data={'tags': [
('logger', 'foobar'),
('site', 'foo'),
('server_name', 'bar'),
]}
)
assert event.logger == 'foobar'
assert event.level == event.group.level
assert event.site == 'foo'
assert event.server_name == 'bar'
assert event.culprit == event.group.culprit
def test_email_subject(self):
event1 = self.create_event(
event_id='a' * 32, group=self.group, tags={'level': 'info'}, message='Foo bar'
)
event2 = self.create_event(
event_id='b' * 32, group=self.group, tags={'level': 'ERROR'}, message='Foo bar'
)
self.group.level = 30
assert event1.get_email_subject() == '[foo Bar] info: Foo bar'
assert event2.get_email_subject() == '[foo Bar] ERROR: Foo bar'
def test_email_subject_with_template(self):
self.project.update_option(
'mail:subject_template',
'$project ${tag:environment}@${tag:release} $$ $title ${tag:invalid} $invalid'
)
event1 = self.create_event(
event_id='a' * 32,
group=self.group,
tags={'level': 'info',
'environment': 'production',
'sentry:release': '0'},
message='baz',
)
assert event1.get_email_subject() == 'foo Bar production@0 $ baz ${tag:invalid} $invalid'
class EventGetLegacyMessageTest(TestCase):
def test_message(self):
event = self.create_event(message='foo bar')
assert event.get_legacy_message() == 'foo bar'
def test_message_interface(self):
event = self.create_event(
message='biz baz',
data={'sentry.interfaces.Message': {
'message': 'foo bar'
}},
)
assert event.get_legacy_message() == 'foo bar'
def test_message_interface_with_formatting(self):
event = self.create_event(
message='biz baz',
data={
'sentry.interfaces.Message': {
'message': 'foo %s',
'formatted': 'foo bar',
'params': ['bar'],
}
},
)
assert event.get_legacy_message() == 'foo bar'
| 32.233766 | 97 | 0.537067 |
6493a3e0dba384602bd4c28c6e965bc2c0e9c85b | 21,581 | py | Python | stable_baselines3/common/logger.py | martinwimpff/stable-baselines3-hmlf | 6ffa947760f8c42a09f9ae42aa7485c608efc42d | [
"MIT"
] | null | null | null | stable_baselines3/common/logger.py | martinwimpff/stable-baselines3-hmlf | 6ffa947760f8c42a09f9ae42aa7485c608efc42d | [
"MIT"
] | null | null | null | stable_baselines3/common/logger.py | martinwimpff/stable-baselines3-hmlf | 6ffa947760f8c42a09f9ae42aa7485c608efc42d | [
"MIT"
] | null | null | null | import datetime
import json
import os
import sys
import tempfile
import warnings
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, TextIO, Tuple, Union
import numpy as np
import pandas
import torch as th
from matplotlib import pyplot as plt
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
SummaryWriter = None
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class Video(object):
"""
Video data class storing the video frames and the frame per seconds
:param frames: frames to create the video from
:param fps: frames per second
"""
def __init__(self, frames: th.Tensor, fps: Union[float, int]):
self.frames = frames
self.fps = fps
class Figure(object):
"""
Figure data class storing a matplotlib figure and whether to close the figure after logging it
:param figure: figure to log
:param close: if true, close the figure after logging it
"""
def __init__(self, figure: plt.figure, close: bool):
self.figure = figure
self.close = close
class Image(object):
"""
Image data class storing an image and data format
:param image: image to log
:param dataformats: Image data format specification of the form NCHW, NHWC, CHW, HWC, HW, WH, etc.
More info in add_image method doc at https://pytorch.org/docs/stable/tensorboard.html
Gym envs normally use 'HWC' (channel last)
"""
def __init__(self, image: Union[th.Tensor, np.ndarray, str], dataformats: str):
self.image = image
self.dataformats = dataformats
class FormatUnsupportedError(NotImplementedError):
"""
Custom error to display informative message when
a value is not supported by some formats.
:param unsupported_formats: A sequence of unsupported formats,
for instance ``["stdout"]``.
:param value_description: Description of the value that cannot be logged by this format.
"""
def __init__(self, unsupported_formats: Sequence[str], value_description: str):
if len(unsupported_formats) > 1:
format_str = f"formats {', '.join(unsupported_formats)} are"
else:
format_str = f"format {unsupported_formats[0]} is"
super(FormatUnsupportedError, self).__init__(
f"The {format_str} not supported for the {value_description} value logged.\n"
f"You can exclude formats via the `exclude` parameter of the logger's `record` function."
)
class KVWriter(object):
"""
Key Value writer
"""
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
"""
Write a dictionary to file
:param key_values:
:param key_excluded:
:param step:
"""
raise NotImplementedError
def close(self) -> None:
"""
Close owned resources
"""
raise NotImplementedError
class SeqWriter(object):
"""
sequence writer
"""
def write_sequence(self, sequence: List) -> None:
"""
write_sequence an array to file
:param sequence:
"""
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
"""A human-readable output format producing ASCII tables of key-value pairs.
Set attribute ``max_length`` to change the maximum length of keys and values
to write to output (or specify it when calling ``__init__``).
:param filename_or_file: the file to write the log to
:param max_length: the maximum length of keys and values to write to output.
Outputs longer than this will be truncated. An error will be raised
if multiple keys are truncated to the same value. The maximum output
width will be ``2*max_length + 7``. The default of 36 produces output
no longer than 79 characters wide.
"""
def __init__(self, filename_or_file: Union[str, TextIO], max_length: int = 36):
self.max_length = max_length
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "write"), f"Expected file or str, got {filename_or_file}"
self.file = filename_or_file
self.own_file = False
def write(self, key_values: Dict, key_excluded: Dict, step: int = 0) -> None:
# Create strings for printing
key2str = {}
tag = None
for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items())):
if excluded is not None and ("stdout" in excluded or "log" in excluded):
continue
elif isinstance(value, Video):
raise FormatUnsupportedError(["stdout", "log"], "video")
elif isinstance(value, Figure):
raise FormatUnsupportedError(["stdout", "log"], "figure")
elif isinstance(value, Image):
raise FormatUnsupportedError(["stdout", "log"], "image")
elif isinstance(value, float):
# Align left
value_str = f"{value:<8.3g}"
else:
value_str = str(value)
if key.find("/") > 0: # Find tag and add it to the dict
tag = key[: key.find("/") + 1]
key2str[self._truncate(tag)] = ""
# Remove tag from key
if tag is not None and tag in key:
key = str(" " + key[len(tag) :])
truncated_key = self._truncate(key)
if truncated_key in key2str:
raise ValueError(
f"Key '{key}' truncated to '{truncated_key}' that already exists. Consider increasing `max_length`."
)
key2str[truncated_key] = self._truncate(value_str)
# Find max widths
if len(key2str) == 0:
warnings.warn("Tried to write empty key-value dict")
return
else:
key_width = max(map(len, key2str.keys()))
val_width = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (key_width + val_width + 7)
lines = [dashes]
for key, value in key2str.items():
key_space = " " * (key_width - len(key))
val_space = " " * (val_width - len(value))
lines.append(f"| {key}{key_space} | {value}{val_space} |")
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, string: str) -> str:
if len(string) > self.max_length:
string = string[: self.max_length - 3] + "..."
return string
def write_sequence(self, sequence: List) -> None:
sequence = list(sequence)
for i, elem in enumerate(sequence):
self.file.write(elem)
if i < len(sequence) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
if self.own_file:
self.file.close()
def filter_excluded_keys(
key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], _format: str
) -> Dict[str, Any]:
"""
Filters the keys specified by ``key_exclude`` for the specified format
:param key_values: log dictionary to be filtered
:param key_excluded: keys to be excluded per format
:param _format: format for which this filter is run
:return: dict without the excluded keys
"""
def is_excluded(key: str) -> bool:
return key in key_excluded and key_excluded[key] is not None and _format in key_excluded[key]
return {key: value for key, value in key_values.items() if not is_excluded(key)}
class JSONOutputFormat(KVWriter):
def __init__(self, filename: str):
"""
log to a file, in the JSON format
:param filename: the file to write the log to
"""
self.file = open(filename, "wt")
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
def cast_to_json_serializable(value: Any):
if isinstance(value, Video):
raise FormatUnsupportedError(["json"], "video")
if isinstance(value, Figure):
raise FormatUnsupportedError(["json"], "figure")
if isinstance(value, Image):
raise FormatUnsupportedError(["json"], "image")
if hasattr(value, "dtype"):
if value.shape == () or len(value) == 1:
# if value is a dimensionless numpy array or of length 1, serialize as a float
return float(value)
else:
# otherwise, a value is a numpy array, serialize as a list or nested lists
return value.tolist()
return value
key_values = {
key: cast_to_json_serializable(value)
for key, value in filter_excluded_keys(key_values, key_excluded, "json").items()
}
self.file.write(json.dumps(key_values) + "\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename: str):
"""
log to a file, in a CSV format
:param filename: the file to write the log to
"""
self.file = open(filename, "w+t")
self.keys = []
self.separator = ","
self.quotechar = '"'
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
# Add our current row to the history
key_values = filter_excluded_keys(key_values, key_excluded, "csv")
extra_keys = key_values.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, key) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(key)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.separator * len(extra_keys))
self.file.write("\n")
for i, key in enumerate(self.keys):
if i > 0:
self.file.write(",")
value = key_values.get(key)
if isinstance(value, Video):
raise FormatUnsupportedError(["csv"], "video")
elif isinstance(value, Figure):
raise FormatUnsupportedError(["csv"], "figure")
elif isinstance(value, Image):
raise FormatUnsupportedError(["csv"], "image")
elif isinstance(value, str):
# escape quotechars by prepending them with another quotechar
value = value.replace(self.quotechar, self.quotechar + self.quotechar)
# additionally wrap text with quotechars so that any delimiters in the text are ignored by csv readers
self.file.write(self.quotechar + value + self.quotechar)
elif value is not None:
self.file.write(str(value))
self.file.write("\n")
self.file.flush()
def close(self) -> None:
"""
closes the file
"""
self.file.close()
class TensorBoardOutputFormat(KVWriter):
def __init__(self, folder: str):
"""
Dumps key/value pairs into TensorBoard's numeric format.
:param folder: the folder to write the log to
"""
assert SummaryWriter is not None, "tensorboard is not installed, you can use " "pip install tensorboard to do so"
self.writer = SummaryWriter(log_dir=folder)
def write(self, key_values: Dict[str, Any], key_excluded: Dict[str, Union[str, Tuple[str, ...]]], step: int = 0) -> None:
for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items())):
if excluded is not None and "tensorboard" in excluded:
continue
if isinstance(value, np.ScalarType):
if isinstance(value, str):
# str is considered a np.ScalarType
self.writer.add_text(key, value, step)
else:
self.writer.add_scalar(key, value, step)
if isinstance(value, th.Tensor):
self.writer.add_histogram(key, value, step)
if isinstance(value, Video):
self.writer.add_video(key, value.frames, step, value.fps)
if isinstance(value, Figure):
self.writer.add_figure(key, value.figure, step, close=value.close)
if isinstance(value, Image):
self.writer.add_image(key, value.image, step, dataformats=value.dataformats)
# Flush the output to the file
self.writer.flush()
def close(self) -> None:
"""
closes the file
"""
if self.writer:
self.writer.close()
self.writer = None
def make_output_format(_format: str, log_dir: str, log_suffix: str = "") -> KVWriter:
"""
return a logger for the requested format
:param _format: the requested format to log to ('stdout', 'log', 'json' or 'csv' or 'tensorboard')
:param log_dir: the logging directory
:param log_suffix: the suffix for the log file
:return: the logger
"""
os.makedirs(log_dir, exist_ok=True)
if _format == "stdout":
return HumanOutputFormat(sys.stdout)
elif _format == "log":
return HumanOutputFormat(os.path.join(log_dir, f"log{log_suffix}.txt"))
elif _format == "json":
return JSONOutputFormat(os.path.join(log_dir, f"progress{log_suffix}.json"))
elif _format == "csv":
return CSVOutputFormat(os.path.join(log_dir, f"progress{log_suffix}.csv"))
elif _format == "tensorboard":
return TensorBoardOutputFormat(log_dir)
else:
raise ValueError(f"Unknown format specified: {_format}")
# ================================================================
# Backend
# ================================================================
class Logger(object):
"""
The logger class.
:param folder: the logging location
:param output_formats: the list of output formats
"""
def __init__(self, folder: Optional[str], output_formats: List[KVWriter]):
self.name_to_value = defaultdict(float) # values this iteration
self.name_to_count = defaultdict(int)
self.name_to_excluded = defaultdict(str)
self.level = INFO
self.dir = folder
self.output_formats = output_formats
def record(self, key: str, value: Any, exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
:param key: save to log this key
:param value: save to log this value
:param exclude: outputs to be excluded
"""
self.name_to_value[key] = value
self.name_to_excluded[key] = exclude
def record_mean(self, key: str, value: Any, exclude: Optional[Union[str, Tuple[str, ...]]] = None) -> None:
"""
The same as record(), but if called many times, values averaged.
:param key: save to log this key
:param value: save to log this value
:param exclude: outputs to be excluded
"""
if value is None:
self.name_to_value[key] = None
return
old_val, count = self.name_to_value[key], self.name_to_count[key]
self.name_to_value[key] = old_val * count / (count + 1) + value / (count + 1)
self.name_to_count[key] = count + 1
self.name_to_excluded[key] = exclude
def dump(self, step: int = 0) -> None:
"""
Write all of the diagnostics from the current iteration
"""
if self.level == DISABLED:
return
for _format in self.output_formats:
if isinstance(_format, KVWriter):
_format.write(self.name_to_value, self.name_to_excluded, step)
self.name_to_value.clear()
self.name_to_count.clear()
self.name_to_excluded.clear()
def log(self, *args, level: int = INFO) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
:param args: log the arguments
:param level: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
if self.level <= level:
self._do_log(args)
def debug(self, *args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the DEBUG level.
:param args: log the arguments
"""
self.log(*args, level=DEBUG)
def info(self, *args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the INFO level.
:param args: log the arguments
"""
self.log(*args, level=INFO)
def warn(self, *args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the WARN level.
:param args: log the arguments
"""
self.log(*args, level=WARN)
def error(self, *args) -> None:
"""
Write the sequence of args, with no separators,
to the console and output files (if you've configured an output file).
Using the ERROR level.
:param args: log the arguments
"""
self.log(*args, level=ERROR)
# Configuration
# ----------------------------------------
def set_level(self, level: int) -> None:
"""
Set logging threshold on current logger.
:param level: the logging level (can be DEBUG=10, INFO=20, WARN=30, ERROR=40, DISABLED=50)
"""
self.level = level
def get_dir(self) -> str:
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
:return: the logging directory
"""
return self.dir
def close(self) -> None:
"""
closes the file
"""
for _format in self.output_formats:
_format.close()
# Misc
# ----------------------------------------
def _do_log(self, args) -> None:
"""
log to the requested format outputs
:param args: the arguments to log
"""
for _format in self.output_formats:
if isinstance(_format, SeqWriter):
_format.write_sequence(map(str, args))
def configure(folder: Optional[str] = None, format_strings: Optional[List[str]] = None) -> Logger:
"""
Configure the current logger.
:param folder: the save location
(if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])
:param format_strings: the output logging format
(if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])
:return: The logger object.
"""
if folder is None:
folder = os.getenv("SB3_LOGDIR")
if folder is None:
folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime("SB3-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(folder, str)
os.makedirs(folder, exist_ok=True)
log_suffix = ""
if format_strings is None:
format_strings = os.getenv("SB3_LOG_FORMAT", "stdout,log,csv").split(",")
format_strings = list(filter(None, format_strings))
output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]
logger = Logger(folder=folder, output_formats=output_formats)
# Only print when some files will be saved
if len(format_strings) > 0 and format_strings != ["stdout"]:
logger.log(f"Logging to {folder}")
return logger
# ================================================================
# Readers
# ================================================================
def read_json(filename: str) -> pandas.DataFrame:
"""
read a json file using pandas
:param filename: the file path to read
:return: the data in the json
"""
data = []
with open(filename, "rt") as file_handler:
for line in file_handler:
data.append(json.loads(line))
return pandas.DataFrame(data)
def read_csv(filename: str) -> pandas.DataFrame:
"""
read a csv file using pandas
:param filename: the file path to read
:return: the data in the csv
"""
return pandas.read_csv(filename, index_col=None, comment="#")
| 33.720313 | 125 | 0.589268 |
3547cdf50c35971804118408397e69797527a4c6 | 1,459 | py | Python | esphome/components/tx20/sensor.py | huhuhugo1/esphome | eb895d2095861a4d51f1a5fcd582a97389c27b4f | [
"MIT"
] | null | null | null | esphome/components/tx20/sensor.py | huhuhugo1/esphome | eb895d2095861a4d51f1a5fcd582a97389c27b4f | [
"MIT"
] | null | null | null | esphome/components/tx20/sensor.py | huhuhugo1/esphome | eb895d2095861a4d51f1a5fcd582a97389c27b4f | [
"MIT"
] | null | null | null | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.components import sensor
from esphome.const import CONF_ID, CONF_WIND_SPEED, CONF_PIN, \
CONF_WIND_DIRECTION_DEGREES, UNIT_KILOMETER_PER_HOUR, \
ICON_WEATHER_WINDY, ICON_SIGN_DIRECTION, UNIT_DEGREES
tx20_ns = cg.esphome_ns.namespace('tx20')
Tx20Component = tx20_ns.class_('Tx20Component', cg.Component)
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(Tx20Component),
cv.Optional(CONF_WIND_SPEED):
sensor.sensor_schema(UNIT_KILOMETER_PER_HOUR, ICON_WEATHER_WINDY, 1),
cv.Optional(CONF_WIND_DIRECTION_DEGREES):
sensor.sensor_schema(UNIT_DEGREES, ICON_SIGN_DIRECTION, 1),
cv.Required(CONF_PIN): cv.All(pins.internal_gpio_input_pin_schema,
pins.validate_has_interrupt),
}).extend(cv.COMPONENT_SCHEMA)
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
if CONF_WIND_SPEED in config:
conf = config[CONF_WIND_SPEED]
sens = yield sensor.new_sensor(conf)
cg.add(var.set_wind_speed_sensor(sens))
if CONF_WIND_DIRECTION_DEGREES in config:
conf = config[CONF_WIND_DIRECTION_DEGREES]
sens = yield sensor.new_sensor(conf)
cg.add(var.set_wind_direction_degrees_sensor(sens))
pin = yield cg.gpio_pin_expression(config[CONF_PIN])
cg.add(var.set_pin(pin))
| 37.410256 | 77 | 0.744345 |
5cc3315cdfe7907704228d6c8e5898cdb8d26ea4 | 1,541 | py | Python | Python Programs/step-numbers-from-1-to-n.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | 2 | 2021-06-26T21:50:59.000Z | 2021-09-18T04:55:51.000Z | Python Programs/step-numbers-from-1-to-n.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | null | null | null | Python Programs/step-numbers-from-1-to-n.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | null | null | null | Step Numbers from 1 to N
The program must accept an integer N as the input. The program must print all the step numbers from 1 to N as the output. If there is no such integer then the program must print -1 as the output. A number is called a step number if all the adjacent digits have an absolute difference of 1.
Note: The number of digits in a step number is always greater than 1.
Input Format:
The first line contains N.
Output Format:
The first line contains the integer value(s) or -1.
Example Input/Output 1:
Input:
12
Output:
10 12
Explanation:
The absolute difference between all the adjacent digits in the integer 10 is 1 (1 - 0).
The absolute difference between all the adjacent digits in the integer 12 is 1 (1 - 2).
So the step numbers from 1 to 12 are 10 and 12.
Hence the output is 10 12
Example Input/Output 2:
Input:
5
Output:
-1
Example Input/Output 3:
Input:
130
Output:
10 12 21 23 32 34 43 45 54 56 65 67 76 78 87 89 98 101 121 123
C:
#include<stdio.h>
#include <stdlib.h>
int fun(int n){
while(n/10){
if(abs(n%10 - (n/10)%10 )!=1)return 0;
n=n/10;
}
return 1;
}
int main()
{
int n;
scanf("%d",&n);
int i=10,c=0;
while(i<=n){
if(fun(i)){
printf("%d ",i);
c=1;
}
i++;
}
if(c==0)printf("-1");
}
Python:
a=int(input());p=0
for i in range(10,a+1):
d,c=i,0
while d>9:
if abs(d%10-(d//10)%10)!=1:
c=1
break
d//=10;
if c==0:print(i,end=' ');p=1
if p==0:print(-1)
| 23.707692 | 289 | 0.621674 |
07b490f74faa19233d1ff227771939dab4b60f9b | 7,060 | py | Python | django_countries/__init__.py | toofishes/django-countries | 5c4745d80c294aed121bf532429cbcd08bf5031f | [
"MIT"
] | null | null | null | django_countries/__init__.py | toofishes/django-countries | 5c4745d80c294aed121bf532429cbcd08bf5031f | [
"MIT"
] | null | null | null | django_countries/__init__.py | toofishes/django-countries | 5c4745d80c294aed121bf532429cbcd08bf5031f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import unicode_literals
from itertools import islice
from django_countries.conf import settings
from django.utils.encoding import force_text
try:
import pyuca
except ImportError:
pyuca = None
# Use UCA sorting if it's available.
if pyuca:
collator = pyuca.Collator()
sort_key = lambda item: collator.sort_key(item[1])
else:
import unicodedata
# Cheap and dirty method to sort against ASCII characters only.
sort_key = lambda item: (
unicodedata.normalize('NFKD', item[1])
.encode('ascii', 'ignore').decode('ascii'))
class Countries(object):
"""
An object containing a list of ISO3166-1 countries.
Iterating this object will return the countries as tuples (of the country
code and name), sorted by name.
"""
@property
def countries(self):
"""
Return the a dictionary of countries, modified by any overriding
settings.
The result is cached so future lookups are less work intensive.
"""
if not hasattr(self, '_countries'):
if settings.COUNTRIES_ONLY:
self._countries = dict(settings.COUNTRIES_ONLY)
else:
# Local import so that countries aren't loaded into memory
# until first used.
from django_countries.data import COUNTRIES, COMMON_NAMES
self._countries = dict(COUNTRIES)
if settings.COUNTRIES_COMMON_NAMES:
self._countries.update(COMMON_NAMES)
if settings.COUNTRIES_OVERRIDE:
self._countries.update(settings.COUNTRIES_OVERRIDE)
self._countries = dict(
(code, name) for code, name in self._countries.items()
if name is not None)
self.countries_first = []
for code in settings.COUNTRIES_FIRST:
code = self.alpha2(code)
if code in self._countries:
self.countries_first.append(code)
return self._countries
@property
def alt_codes(self):
if not hasattr(self, '_alt_codes'):
# Again, local import so data is not loaded unless it's needed.
from django_countries.data import ALT_CODES
self._alt_codes = ALT_CODES
return self._alt_codes
@countries.deleter
def countries(self):
"""
Reset the countries cache in case for some crazy reason the settings
change. But surely no one is crazy enough to do that, right?
"""
if hasattr(self, '_countries'):
del self._countries
def __iter__(self):
"""
Iterate through countries, sorted by name.
Each country record consists of a tuple of the two letter ISO3166-1
country code and short name.
The sorting happens based on the thread's current translation.
Countries that are in ``settings.COUNTRIES_FIRST`` will be displayed
before any sorted countries (in the order provided), and are only
repeated in the sorted list if ``settings.COUNTRIES_FIRST_REPEAT`` is
``True``.
The first countries can be separated from the sorted list by the value
provided in ``settings.COUNTRIES_FIRST_BREAK``.
"""
# Yield countries that should be displayed first.
for code in self.countries_first:
yield (code, force_text(self.countries[code]))
if (self.countries_first and settings.COUNTRIES_FIRST_BREAK):
yield ('', force_text(settings.COUNTRIES_FIRST_BREAK))
# Force translation before sorting.
countries = [
(code, force_text(name)) for code, name in self.countries.items()
if settings.COUNTRIES_FIRST_REPEAT
or code not in self.countries_first]
# Return sorted country list.
for item in sorted(countries, key=sort_key):
yield item
def alpha2(self, code):
"""
Return the two letter country code when passed any type of ISO 3166-1
country code.
If no match is found, returns an empty string.
"""
code = force_text(code).upper()
if code.isdigit():
lookup_code = int(code)
find = lambda alt_codes: alt_codes[1] == lookup_code
elif len(code) == 3:
lookup_code = code
find = lambda alt_codes: alt_codes[0] == lookup_code
else:
find = None
if find:
code = None
for alpha2, alt_codes in self.alt_codes.items():
if find(alt_codes):
code = alpha2
break
if code in self.countries:
return code
return ''
def name(self, code):
"""
Return the name of a country, based on the code.
If no match is found, returns an empty string.
"""
code = self.alpha2(code)
return self.countries.get(code, '')
def alpha3(self, code):
"""
Return the ISO 3166-1 three letter country code matching the provided
country code.
If no match is found, returns an empty string.
"""
code = self.alpha2(code)
try:
return self.alt_codes[code][0]
except KeyError:
return ''
def numeric(self, code, padded=False):
"""
Return the ISO 3166-1 numeric country code matching the provided
country code.
If no match is found, returns ``None``.
:param padded: Pass ``True`` to return a 0-padded three character
string, otherwise an integer will be returned.
"""
code = self.alpha2(code)
try:
num = self.alt_codes[code][1]
except KeyError:
return None
if padded:
return '%03d' % num
return num
def __len__(self):
"""
len() used by several third party applications to calculate the length
of choices. This will solve a bug related to generating fixtures.
"""
count = len(self.countries)
# Add first countries, and the break if necessary.
count += len(self.countries_first)
if self.countries_first and settings.COUNTRIES_FIRST_BREAK:
count += 1
return count
def __bool__(self):
return bool(self.countries)
__nonzero__ = __bool__
def __contains__(self, code):
"""
Check to see if the countries contains the given code.
"""
return code in self.countries
def __getitem__(self, index):
"""
Some applications expect to be able to access members of the field
choices by index.
"""
try:
return next(islice(self.__iter__(), index, index+1))
except TypeError:
return list(islice(self.__iter__(), index.start, index.stop,
index.step))
countries = Countries()
| 32.685185 | 78 | 0.596034 |
cd57a3d31ec50f7b18f6f73eea9a565817927311 | 1,081 | py | Python | scripts/leg_init_position.py | open-dynamic-robot-initiative/python_blmc | 8b8e9d73469098437ed43f237f65be74b2c99060 | [
"BSD-3-Clause"
] | null | null | null | scripts/leg_init_position.py | open-dynamic-robot-initiative/python_blmc | 8b8e9d73469098437ed43f237f65be74b2c99060 | [
"BSD-3-Clause"
] | null | null | null | scripts/leg_init_position.py | open-dynamic-robot-initiative/python_blmc | 8b8e9d73469098437ed43f237f65be74b2c99060 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Demo on how to initialize the zero position at start up.
Runs the position initialization procedure and starts to print joint positions
when done.
"""
from __future__ import print_function
import can
import time
import blmc.motor_data as md
import blmc.can_helper as ch
from blmc.helper import get_time
BITRATE = 1e6
if __name__ == "__main__":
mtr_data = md.MotorData()
bus = can.interface.Bus(bitrate=BITRATE)
last_print = 0
# We need the position data for this to work
ch.send_command(bus, ch.Command.send_position, 1)
# Initialize the position
md.init_position_offset(bus, mtr_data)
time.sleep(1)
# Print position data to validate initialization
for msg in bus:
t = get_time()
if msg.arbitration_id == md.ArbitrationIds.position:
mtr_data.set_position(msg)
if last_print < t - 1:
last_print = t
print("pos1: {}, pos2: {}".format(
mtr_data.mtr1.position.value,
mtr_data.mtr2.position.value))
| 25.738095 | 78 | 0.6605 |
f5f9d24be73a9a8e2a967ba25e47ef2a0274f323 | 4,701 | py | Python | django_any/test.py | ixc/django-whatever | 8889321bb935aec7b7747def4768819b1623f832 | [
"MIT"
] | 1 | 2015-02-21T11:49:33.000Z | 2015-02-21T11:49:33.000Z | django_any/test.py | ixc/django-whatever | 8889321bb935aec7b7747def4768819b1623f832 | [
"MIT"
] | null | null | null | django_any/test.py | ixc/django-whatever | 8889321bb935aec7b7747def4768819b1623f832 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time, random
try:
from unittest import _strclass
except ImportError:
_strclass = str
from django import forms
from django_any import any_form
from django.test.client import Client as DjangoClient
from django_any.contrib.auth import any_user
from django.contrib.admin.helpers import AdminForm
from django_any import xunit
def _context_keys_iterator(context):
for container_or_key in context:
if isinstance(container_or_key, basestring):
yield container_or_key
else:
for key in _context_keys_iterator(container_or_key):
yield key
def _request_context_forms(context):
"""
Lookup all stored in context froms instance
"""
for key in _context_keys_iterator(context):
inst = context[key]
if isinstance(inst, (forms.Form, forms.ModelForm)):
yield inst
elif isinstance(inst, forms.formsets.BaseFormSet):
yield inst
elif isinstance(inst, AdminForm):
yield inst.form
class Client(DjangoClient):
def login_as(self, **kwargs):
password = xunit.any_string()
if 'user' in kwargs:
user = kwargs['user']
try:
user.set_password(password)
user.save()
except Exception:
raise AssertionError('Bad user object')
else:
user = any_user(password=password, **kwargs)
if self.login(username=user.username, password=password):
return user
raise AssertionError('Can''t login with autogenerated user')
def post_any_data(self, url, extra=None, context_forms=_request_context_forms, **kwargs):
response = self.get(url)
post_data = {}
# extract foms instances
if callable(context_forms):
forms_list = context_forms(response.context)
elif isinstance(context_forms, (list, tuple)):
forms_list = [response.context[form_name] for form_name in context_forms]
else:
raise TypeError('context_forms should be callable or list or tuple, not %s' % type(context_forms).__name__)
# generate data
for form in forms_list:
if isinstance(form, forms.formsets.BaseFormSet): # TODO any_form ExtensionMethod
#TODO support formset data
form_data = form.management_form.initial
form_data['MAX_NUM_FORMS'] = 0
else:
form_data, form_files = any_form(form.__class__, **kwargs) #TODO support form instance
if form.prefix:
form_data = dict([('%s-%s' % (form.prefix, key), value) for key, value in form_data.items()])
post_data.update(form_data)
if extra:
post_data.update(extra)
return self.post(url, post_data)
def without_random_seed(func):
"""
Marks that test method do not need to be started with random seed
"""
func.__django_any_without_random_seed = True
return func
def with_seed(seed):
"""
Marks that test method do not need to be started with specific seed
"""
def _wrapper(func):
seeds = getattr(func, '__django_any_with_seed', [])
seeds.append(seed)
func.__django_any_with_seed = seeds
return func
return _wrapper
def set_seed(func, seed=None):
"""
Set randon seed before executing function. If seed is
not provided current timestamp used
"""
def _wrapper(self, seed=seed, *args, **kwargs):
self.__django_any_seed = seed if seed else int(time.time()*1000)
random.seed(self.__django_any_seed)
return func(self, *args, **kwargs)
return _wrapper
class WithTestDataSeed(type):
"""
Metaclass for TestCases, manages random tests run
"""
def __new__(cls, cls_name, bases, attrs):
attrs['__django_any_seed'] = 0
def shortDescription(self):
return "%s (%s) With seed %s" % (self._testMethodName, _strclass(self.__class__), getattr(self, '__django_any_seed'))
for name, func in attrs.items():
if name.startswith('test') and hasattr(func, '__call__'):
if getattr(func, '__django_any_without_random_seed', False):
del attrs[name]
else:
attrs[name] = set_seed(func)
for seed in getattr(func, '__django_any_with_seed', []):
attrs['%s_%d' % (name, seed)] = set_seed(func, seed)
testcase = super(WithTestDataSeed, cls).__new__(cls, cls_name, bases, attrs)
testcase.shortDescription = shortDescription
return testcase
| 32.19863 | 130 | 0.62859 |
8f303b0b13200089756242ef79f73eb29122007d | 878 | py | Python | python/setup.py | bowlofstew/planout | 175f3833c00df7c0a4606396797cbb4220ea6cb6 | [
"BSD-3-Clause"
] | null | null | null | python/setup.py | bowlofstew/planout | 175f3833c00df7c0a4606396797cbb4220ea6cb6 | [
"BSD-3-Clause"
] | null | null | null | python/setup.py | bowlofstew/planout | 175f3833c00df7c0a4606396797cbb4220ea6cb6 | [
"BSD-3-Clause"
] | null | null | null | from distutils.core import setup
setup(
name='PlanOut',
version='0.5',
author='Facebook, Inc.',
author_email='eytan@fb.com',
packages=[
'planout',
'planout.ops',
'planout.test'
],
url='http://pypi.python.org/pypi/PlanOut/',
license='LICENSE',
description='PlanOut is a framework for online field experimentation.',
keywords=['experimentation', 'A/B testing'],
long_description="""PlanOut is a framework for online field experimentation.
PlanOut makes it easy to design both simple A/B tests and more complex
experiments, including multi-factorial designs and within-subjects designs.
It also includes advanced features, including built-in logging, experiment
management, and serialization of experiments via a domain-specific language.
""",
)
# long_description=open('README.md').read(),
| 33.769231 | 80 | 0.694761 |
381ea4da8e57d809bd0d1f796775e9de4bebc6a3 | 1,169 | py | Python | File_Transfer_Protocol/ftp_send_receive.py | srivama/Python | 809d4c077c179feb077f09a3cd2501f9724366a2 | [
"MIT"
] | null | null | null | File_Transfer_Protocol/ftp_send_receive.py | srivama/Python | 809d4c077c179feb077f09a3cd2501f9724366a2 | [
"MIT"
] | null | null | null | File_Transfer_Protocol/ftp_send_receive.py | srivama/Python | 809d4c077c179feb077f09a3cd2501f9724366a2 | [
"MIT"
] | null | null | null | """
File transfer protocol used to send and receive files using FTP server.
Use credentials to provide access to the FTP client
Note: Do not use root user name & password for security reasons
Create a separate user and provide access to a home directory of the user
Use logic id and password of the user created cad here stands for current working directory
"""
from ftplib import FTP
ftp = FTP('xxx.xxx.x.x') # Enter the ip address or the domain name here
ftp.login(user='username', passwd='password')
ftp.cwd('/Enter the directory here/')
"""The file which will be received via the FTP server
Enter the location of the file where the file is received"""
def ReceiveFile():
FileName = 'example.txt' """ Enter the location of the file """
LocalFile = open(FileName, 'wb')
ftp.retrbinary('RETR ' + FileName, LocalFile.write, 1024)
ftp.quit()
LocalFile.close()
"""The file which will be sent via the FTP server
The file send will be send to the current working directory"""
def SendFile():
FileName = 'example.txt' """ Enter the name of the file """
ftp.storbinary('STOR ' + FileName, open(FileName, 'rb'))
ftp.quit()
| 33.4 | 92 | 0.711719 |
7fc9168ce6f9fbe70d95c79ca8b3c202d82c877e | 1,900 | py | Python | lib/cxxtest/python/setup.py | aproeme/libgeodecomp | f78899c67ad62540fd153cba132a0a363a7b3fa9 | [
"BSL-1.0"
] | 493 | 2016-07-11T13:35:24.000Z | 2022-02-15T13:04:29.000Z | lib/cxxtest/python/setup.py | aproeme/libgeodecomp | f78899c67ad62540fd153cba132a0a363a7b3fa9 | [
"BSL-1.0"
] | 72 | 2015-02-05T10:41:30.000Z | 2022-03-03T12:02:47.000Z | lib/cxxtest/python/setup.py | aproeme/libgeodecomp | f78899c67ad62540fd153cba132a0a363a7b3fa9 | [
"BSL-1.0"
] | 229 | 2016-07-12T10:39:54.000Z | 2022-02-15T13:04:31.000Z | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
"""
Script to generate the installer for cxxtest.
"""
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: End Users/Desktop
License :: OSI Approved :: LGPL License
Natural Language :: English
Operating System :: Microsoft :: Windows
Operating System :: Unix
Programming Language :: Python
Topic :: Software Development :: Libraries :: Python Modules
"""
import os
import sys
from os.path import realpath, dirname
if sys.version_info >= (3,0):
sys.path.insert(0, dirname(realpath(__file__))+os.sep+'python3')
os.chdir('python3')
import cxxtest
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
doclines = cxxtest.__doc__.split("\n")
setup(name="cxxtest",
version=cxxtest.__version__,
maintainer=cxxtest.__maintainer__,
maintainer_email=cxxtest.__maintainer_email__,
url = cxxtest.__url__,
license = cxxtest.__license__,
platforms = ["any"],
description = doclines[0],
classifiers = filter(None, classifiers.split("\n")),
long_description = "\n".join(doclines[2:]),
packages=['cxxtest'],
keywords=['utility'],
scripts=['scripts/cxxtestgen']
#
# The entry_points option is not supported by distutils.core
#
#entry_points="""
#[console_scripts]
#cxxtestgen = cxxtest.cxxtestgen:main
#"""
)
| 30.15873 | 74 | 0.641579 |
6045ec6e264d342642d3d4a0c87fe2bf712ad52d | 4,067 | py | Python | fairseq/bleu.py | gcunhase/fairseq | dfc13956a3f6c40b8de44d7b8b0959f62c8db025 | [
"BSD-3-Clause"
] | null | null | null | fairseq/bleu.py | gcunhase/fairseq | dfc13956a3f6c40b8de44d7b8b0959f62c8db025 | [
"BSD-3-Clause"
] | null | null | null | fairseq/bleu.py | gcunhase/fairseq | dfc13956a3f6c40b8de44d7b8b0959f62c8db025 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import ctypes
import math
import torch
try:
from fairseq import libbleu
except ImportError as e:
import sys
sys.stderr.write('ERROR: missing libbleu.so. run `python setup.py build develop`\n')
raise e
C = ctypes.cdll.LoadLibrary(libbleu.__file__)
class BleuStat(ctypes.Structure):
_fields_ = [
('reflen', ctypes.c_size_t),
('predlen', ctypes.c_size_t),
('match1', ctypes.c_size_t),
('count1', ctypes.c_size_t),
('match2', ctypes.c_size_t),
('count2', ctypes.c_size_t),
('match3', ctypes.c_size_t),
('count3', ctypes.c_size_t),
('match4', ctypes.c_size_t),
('count4', ctypes.c_size_t),
]
class SacrebleuScorer(object):
def __init__(self):
import sacrebleu
self.sacrebleu = sacrebleu
self.reset()
def reset(self, one_init=False):
if one_init:
raise NotImplementedError
self.ref = []
self.sys = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.sys.append(pred)
def score(self, order=4):
return self.result_string(order).bleu
def result_string(self, order=4):
if order != 4:
raise NotImplementedError
return self.sacrebleu.corpus_bleu(self.sys, [self.ref])
class Scorer(object):
def __init__(self, pad, eos, unk):
self.stat = BleuStat()
self.pad = pad
self.eos = eos
self.unk = unk
self.reset()
def reset(self, one_init=False):
if one_init:
C.bleu_one_init(ctypes.byref(self.stat))
else:
C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError('ref must be a torch.IntTensor (got {})'
.format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError('pred must be a torch.IntTensor(got {})'
.format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos))
def score(self, order=4):
psum = sum(math.log(p) if p > 0 else float('-Inf')
for p in self.precision()[:order])
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = 'BLEU{} = {:2.2f}, {:2.1f}'
for _ in range(1, order):
fmt += '/{:2.1f}'
fmt += ' (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})'
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(order, self.score(order=order), *bleup,
self.brevity(), self.stat.predlen/self.stat.reflen,
self.stat.predlen, self.stat.reflen)
| 30.810606 | 88 | 0.57192 |
2769de9c0d8568b13dc0e82f94d528a9cef1c2a8 | 7,724 | py | Python | ucsmsdk/mometa/fabric/FabricFcVsanPc.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/fabric/FabricFcVsanPc.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/fabric/FabricFcVsanPc.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for FabricFcVsanPc ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FabricFcVsanPcConsts():
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
IF_ROLE_DIAG = "diag"
IF_ROLE_FCOE_NAS_STORAGE = "fcoe-nas-storage"
IF_ROLE_FCOE_STORAGE = "fcoe-storage"
IF_ROLE_FCOE_UPLINK = "fcoe-uplink"
IF_ROLE_MGMT = "mgmt"
IF_ROLE_MONITOR = "monitor"
IF_ROLE_NAS_STORAGE = "nas-storage"
IF_ROLE_NETWORK = "network"
IF_ROLE_NETWORK_FCOE_UPLINK = "network-fcoe-uplink"
IF_ROLE_SERVER = "server"
IF_ROLE_SERVICE = "service"
IF_ROLE_STORAGE = "storage"
IF_ROLE_UNKNOWN = "unknown"
IF_TYPE_AGGREGATION = "aggregation"
IF_TYPE_PHYSICAL = "physical"
IF_TYPE_UNKNOWN = "unknown"
IF_TYPE_VIRTUAL = "virtual"
OPER_STATE_ADMIN_DOWN = "admin-down"
OPER_STATE_DOWN = "down"
OPER_STATE_ERROR_DISABLED = "error-disabled"
OPER_STATE_FAILED = "failed"
OPER_STATE_HARDWARE_FAILURE = "hardware-failure"
OPER_STATE_INDETERMINATE = "indeterminate"
OPER_STATE_LINK_DOWN = "link-down"
OPER_STATE_LINK_UP = "link-up"
OPER_STATE_NO_LICENSE = "no-license"
OPER_STATE_SFP_NOT_PRESENT = "sfp-not-present"
OPER_STATE_SOFTWARE_FAILURE = "software-failure"
OPER_STATE_UDLD_AGGR_DOWN = "udld-aggr-down"
OPER_STATE_UP = "up"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
class FabricFcVsanPc(ManagedObject):
"""This is FabricFcVsanPc class."""
consts = FabricFcVsanPcConsts()
naming_props = set([u'switchId', u'portId'])
mo_meta = MoMeta("FabricFcVsanPc", "fabricFcVsanPc", "pc-switch-[switch_id]-pc-[port_id]", VersionMeta.Version141i, "InputOutput", 0x3ff, [], ["admin", "ext-san-config", "ext-san-policy"], [u'fabricVsan'], [u'faultInst'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["disabled", "enabled"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x4, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version221b, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"ep_dn": MoPropertyMeta("ep_dn", "epDn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"if_role": MoPropertyMeta("if_role", "ifRole", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["diag", "fcoe-nas-storage", "fcoe-storage", "fcoe-uplink", "mgmt", "monitor", "nas-storage", "network", "network-fcoe-uplink", "server", "service", "storage", "unknown"], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["aggregation", "physical", "unknown", "virtual"], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["admin-down", "down", "error-disabled", "failed", "hardware-failure", "indeterminate", "link-down", "link-up", "no-license", "sfp-not-present", "software-failure", "udld-aggr-down", "up"], []),
"peer_dn": MoPropertyMeta("peer_dn", "peerDn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"port_id": MoPropertyMeta("port_id", "portId", "uint", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x40, None, None, None, [], ["1-256"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"state_qual": MoPropertyMeta("state_qual", "stateQual", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x200, None, None, None, ["A", "B", "NONE"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"warnings": MoPropertyMeta("warnings", "warnings", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|none|fc-zoning-enabled|configuration-error),){0,3}(defaultValue|none|fc-zoning-enabled|configuration-error){0,1}""", [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"epDn": "ep_dn",
"fltAggr": "flt_aggr",
"ifRole": "if_role",
"ifType": "if_type",
"locale": "locale",
"name": "name",
"operState": "oper_state",
"peerDn": "peer_dn",
"portId": "port_id",
"rn": "rn",
"sacl": "sacl",
"stateQual": "state_qual",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
"warnings": "warnings",
}
def __init__(self, parent_mo_or_dn, switch_id, port_id, **kwargs):
self._dirty_mask = 0
self.switch_id = switch_id
self.port_id = port_id
self.admin_state = None
self.child_action = None
self.descr = None
self.ep_dn = None
self.flt_aggr = None
self.if_role = None
self.if_type = None
self.locale = None
self.name = None
self.oper_state = None
self.peer_dn = None
self.sacl = None
self.state_qual = None
self.status = None
self.transport = None
self.type = None
self.warnings = None
ManagedObject.__init__(self, "FabricFcVsanPc", parent_mo_or_dn, **kwargs)
| 60.818898 | 344 | 0.651346 |
fcb4db0b630a486bbf51833af1f4f05fa66f2aa7 | 440 | py | Python | advocate/__init__.py | ColdHeat/Advocate | 6d699aed899784dfae5fac28e29567936bed81a3 | [
"Apache-2.0"
] | 76 | 2015-12-05T02:38:13.000Z | 2022-02-02T07:34:57.000Z | advocate/__init__.py | ColdHeat/Advocate | 6d699aed899784dfae5fac28e29567936bed81a3 | [
"Apache-2.0"
] | 16 | 2015-10-29T20:28:45.000Z | 2021-10-11T04:43:38.000Z | advocate/__init__.py | ColdHeat/Advocate | 6d699aed899784dfae5fac28e29567936bed81a3 | [
"Apache-2.0"
] | 9 | 2017-01-28T01:14:09.000Z | 2022-01-17T00:20:31.000Z | __version__ = "1.0.0"
from requests import utils
from requests.models import Request, Response, PreparedRequest
from requests.status_codes import codes
from requests.exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
from .adapters import ValidatingHTTPAdapter
from .api import *
from .addrvalidator import AddrValidator
from .exceptions import UnacceptableAddressException
| 29.333333 | 62 | 0.827273 |
be487f768bf77631352158b2bb1237f4c8e07c1d | 1,795 | py | Python | shared/modules/verify_cce_module.py | fduthilleul/scap-security-guide | f9b67869600f6c20dcb0ba83801578cec1a51bba | [
"BSD-3-Clause"
] | null | null | null | shared/modules/verify_cce_module.py | fduthilleul/scap-security-guide | f9b67869600f6c20dcb0ba83801578cec1a51bba | [
"BSD-3-Clause"
] | null | null | null | shared/modules/verify_cce_module.py | fduthilleul/scap-security-guide | f9b67869600f6c20dcb0ba83801578cec1a51bba | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2
import sys
import platform
from lxml import etree
# This script checks the validity of assigned CCEs, lists granted and remaining
# available CCEs, and checks for duplicates.
release = '%.0f' % float(platform.linux_distribution()[1])
xccdf_ns = "http://checklists.nist.gov/xccdf/1.1"
tree = etree.parse('../output/unlinked-rhel' + str(release) + '-xccdf.xml')
cces_assigned = tree.findall("//{%s}ident[@system='http://cce.mitre.org']"
% xccdf_ns)
assigned_ids = []
granted_ids = []
# print the list of assigned CCEs
print "Assigned CCEs:"
for item in cces_assigned:
print item.text
assigned_ids.append(item.text)
print "-------------"
# check for duplicates in the assigned CCE list
dup_assigned_ids = [item for item in cces_assigned if cces_assigned.count(item) > 1]
for item in dup_assigned_ids:
print "Duplicate assignment of CCE: %s" % item
# open the available CCE file
with open('../references/cce-rhel' + int(release) + '-avail.txt', 'r') as txt_file:
for line in txt_file:
granted_ids = [line.rstrip('\n') for line in txt_file]
# print CCEs that are available (i.e. in granted but not assigned)
for item in granted_ids:
if item not in assigned_ids:
print "Available CCE: %s" % item
for rule in tree.findall("//{%s}Rule" % xccdf_ns):
# print "rule is " + rule.get("id")
items = rule.findall("{%s}ident[@system='http://cce.mitre.org']" % xccdf_ns)
if len(items) > 1:
print "Rule with multiple CCEs assigned: %s" % rule.get("id")
if len(items) == 0:
print "Rule without CCE: %s" % rule.get("id")
for item in items:
if item.text not in granted_ids:
print "Invalid CCE: %s in %s" % (item.text, rule.get("id"))
sys.exit()
| 33.867925 | 84 | 0.651253 |
5c59b6fb3431f6ac626e34d2ac6b1bfd96c606ea | 45,314 | py | Python | zerver/tornado/event_queue.py | measo3/2018-2-OSS-L5 | 15af7b91489b6cab794c5bd5af5948b3cc059f85 | [
"Apache-2.0"
] | 3 | 2018-12-04T01:44:43.000Z | 2019-05-13T06:16:21.000Z | zerver/tornado/event_queue.py | measo3/2018-2-OSS-L5 | 15af7b91489b6cab794c5bd5af5948b3cc059f85 | [
"Apache-2.0"
] | 58 | 2018-11-27T15:18:54.000Z | 2018-12-09T13:43:07.000Z | zerver/tornado/event_queue.py | measo3/2018-2-OSS-L5 | 15af7b91489b6cab794c5bd5af5948b3cc059f85 | [
"Apache-2.0"
] | 4 | 2018-11-29T22:47:27.000Z | 2018-12-04T09:34:22.000Z | # See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
from typing import cast, AbstractSet, Any, Callable, Dict, List, \
Mapping, MutableMapping, Optional, Iterable, Sequence, Set, Union
from mypy_extensions import TypedDict
from django.utils.translation import ugettext as _
from django.conf import settings
from collections import deque
import os
import time
import logging
import ujson
import requests
import atexit
import sys
import signal
import tornado.autoreload
import tornado.ioloop
import random
from zerver.models import UserProfile, Client, Realm
from zerver.decorator import cachify
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
finish_handler, handler_stats_string
from zerver.lib.utils import statsd
from zerver.middleware import async_request_timer_restart
from zerver.lib.message import MessageDict
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.request import JsonableError
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
from zerver.tornado.exceptions import BadEventQueueIdError
from zerver.tornado.sharding import get_tornado_uri, get_tornado_port, \
notify_tornado_queue_name, tornado_return_queue_name
import copy
requests_client = requests.Session()
for host in ['127.0.0.1', 'localhost']:
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
# This seems like the only working solution to ignore proxy in
# requests library.
requests_client.trust_env = False
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor:
def __init__(self,
user_profile_id: int,
user_profile_email: str,
realm_id: int, event_queue: 'EventQueue',
event_types: Optional[Sequence[str]],
client_type_name: str,
apply_markdown: bool=True,
client_gravatar: bool=True,
all_public_streams: bool=False,
lifespan_secs: int=0,
narrow: Iterable[Sequence[str]]=[]) -> None:
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.user_profile_email = user_profile_email
self.realm_id = realm_id
self.current_handler_id = None # type: Optional[int]
self.current_client_name = None # type: Optional[str]
self.event_queue = event_queue
self.queue_timeout = lifespan_secs
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.all_public_streams = all_public_streams
self.client_type_name = client_type_name
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.call_later
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Clamp queue_timeout to between minimum and maximum timeouts
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS,
min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
user_profile_email=self.user_profile_email,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
client_gravatar=self.client_gravatar,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name)
def __repr__(self) -> str:
return "ClientDescriptor<%s>" % (self.event_queue.id,)
@classmethod
def from_dict(cls, d: MutableMapping[str, Any]) -> 'ClientDescriptor':
if 'user_profile_email' not in d:
# Temporary migration for the addition of the new user_profile_email field
from zerver.models import get_user_profile_by_id
d['user_profile_email'] = get_user_profile_by_id(d['user_profile_id']).email
if 'client_type' in d:
# Temporary migration for the rename of client_type to client_type_name
d['client_type_name'] = d['client_type']
if 'client_gravatar' not in d:
# Temporary migration for the addition of the client_gravatar field
d['client_gravatar'] = False
ret = cls(
d['user_profile_id'],
d['user_profile_email'],
d['realm_id'],
EventQueue.from_dict(d['event_queue']),
d['event_types'],
d['client_type_name'],
d['apply_markdown'],
d['client_gravatar'],
d['all_public_streams'],
d['queue_timeout'],
d.get('narrow', [])
)
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self) -> None:
self.current_handler_id = None
self._timeout_handle = None
def add_event(self, event: Dict[str, Any]) -> None:
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_timer_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self) -> bool:
if self.current_handler_id is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
finish_handler(self.current_handler_id, self.event_queue.id,
self.event_queue.contents(), self.apply_markdown)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event: Mapping[str, Any]) -> bool:
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self) -> bool:
return self.event_types is None or "message" in self.event_types
def idle(self, now: float) -> bool:
if not hasattr(self, 'queue_timeout'):
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
return (self.current_handler_id is None and
now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler_id: int, client_name: str) -> None:
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback() -> None:
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
ioloop = tornado.ioloop.IOLoop.instance()
interval = HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type_name != 'API: heartbeat test':
self._timeout_handle = ioloop.call_later(interval, timeout_callback)
def disconnect_handler(self, client_closed: bool=False) -> None:
if self.current_handler_id:
clear_descriptor_by_handler_id(self.current_handler_id, None)
clear_handler_by_id(self.current_handler_id)
if client_closed:
logging.info("Client disconnected for queue %s (%s via %s)" %
(self.event_queue.id, self.user_profile_email,
self.current_client_name))
self.current_handler_id = None
self.current_client_name = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self) -> None:
# Before we can GC the event queue, we need to disconnect the
# handler and notify the client (or connection server) so that
# they can cleanup their own state related to the GC'd event
# queue. Finishing the handler before we GC ensures the
# invariant that event queues are idle when passed to
# `do_gc_event_queues` is preserved.
self.finish_current_handler()
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id},
{self.realm_id})
def compute_full_event_type(event: Mapping[str, Any]) -> str:
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/%s/%s" % (event["flag"], event["operation"])
return "flags/%s/%s" % (event["operation"], event["flag"])
return event["type"]
class EventQueue:
def __init__(self, id: str) -> None:
self.queue = deque() # type: ignore # Should be Deque[Dict[str, Any]], but Deque isn't available in Python 3.4
self.next_event_id = 0 # type: int
self.id = id # type: str
self.virtual_events = {} # type: Dict[str, Dict[str, Any]]
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events)
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> 'EventQueue':
ret = cls(d['id'])
ret.next_event_id = d['next_event_id']
ret.queue = deque(d['queue'])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, event: Dict[str, Any]) -> None:
event['id'] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if (full_event_type in ["pointer", "restart"] or
full_event_type.startswith("flags/")):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "pointer":
virtual_event["pointer"] = event["pointer"]
elif full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self) -> Dict[str, Any]:
return self.queue.popleft()
def empty(self) -> bool:
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id: int) -> None:
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
self.pop()
def contents(self) -> List[Dict[str, Any]]:
contents = [] # type: List[Dict[str, Any]]
virtual_id_map = {} # type: Dict[str, Dict[str, Any]]
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(list(virtual_id_map.keys()))
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
return contents
# maps queue ids to client descriptors
clients = {} # type: Dict[str, ClientDescriptor]
# maps user id to list of client descriptors
user_clients = {} # type: Dict[int, List[ClientDescriptor]]
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams = {} # type: Dict[int, List[ClientDescriptor]]
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks = [] # type: List[Callable[[int, ClientDescriptor, bool], None]]
next_queue_id = 0
def clear_client_event_queues_for_testing() -> None:
assert(settings.TEST_SUITE)
clients.clear()
user_clients.clear()
realm_clients_all_streams.clear()
gc_hooks.clear()
global next_queue_id
next_queue_id = 0
def add_client_gc_hook(hook: Callable[[int, ClientDescriptor, bool], None]) -> None:
gc_hooks.append(hook)
def get_client_descriptor(queue_id: str) -> ClientDescriptor:
return clients.get(queue_id)
def get_client_descriptors_for_user(user_profile_id: int) -> List[ClientDescriptor]:
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id: int) -> List[ClientDescriptor]:
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client: ClientDescriptor) -> None:
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(new_queue_data: MutableMapping[str, Any]) -> ClientDescriptor:
global next_queue_id
queue_id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
next_queue_id += 1
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
client = ClientDescriptor.from_dict(new_queue_data)
clients[queue_id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(to_remove: AbstractSet[str], affected_users: AbstractSet[int],
affected_realms: AbstractSet[int]) -> None:
def filter_client_dict(client_dict: MutableMapping[int, List[ClientDescriptor]], key: int) -> None:
if key not in client_dict:
return
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
del clients[id]
def gc_event_queues(port: int) -> None:
start = time.time()
to_remove = set() # type: Set[str]
affected_users = set() # type: Set[int]
affected_realms = set() # type: Set[int]
for (id, client) in clients.items():
if client.idle(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
# We don't need to call e.g. finish_current_handler on the clients
# being removed because they are guaranteed to be idle and thus
# not have a current handler.
do_gc_event_queues(to_remove, affected_users, affected_realms)
if settings.PRODUCTION:
logging.info(('Tornado %d removed %d idle event queues owned by %d users in %.3fs.' +
' Now %d active queues, %s')
% (port, len(to_remove), len(affected_users), time.time() - start,
len(clients), handler_stats_string()))
statsd.gauge('tornado.active_queues', len(clients))
statsd.gauge('tornado.active_users', len(user_clients))
def persistent_queue_filename(port: int, last: bool=False) -> str:
if settings.TORNADO_PROCESSES == 1:
# Use non-port-aware, legacy version.
if last:
return "/var/tmp/event_queues.json.last"
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('',)
if last:
return "/var/tmp/event_queues.%d.last.json" % (port,)
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('.' + str(port),)
def dump_event_queues(port: int) -> None:
start = time.time()
with open(persistent_queue_filename(port), "w") as stored_queues:
ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.items()],
stored_queues)
logging.info('Tornado %d dumped %d event queues in %.3fs'
% (port, len(clients), time.time() - start))
def load_event_queues(port: int) -> None:
global clients
start = time.time()
# ujson chokes on bad input pretty easily. We separate out the actual
# file reading from the loading so that we don't silently fail if we get
# bad input.
try:
with open(persistent_queue_filename(port), "r") as stored_queues:
json_data = stored_queues.read()
try:
clients = dict((qid, ClientDescriptor.from_dict(client))
for (qid, client) in ujson.loads(json_data))
except Exception:
logging.exception("Tornado %d could not deserialize event queues" % (port,))
except (IOError, EOFError):
pass
for client in clients.values():
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
logging.info('Tornado %d loaded %d event queues in %.3fs'
% (port, len(clients), time.time() - start))
def send_restart_events(immediate: bool=False) -> None:
event = dict(type='restart', server_generation=settings.SERVER_GENERATION) # type: Dict[str, Any]
if immediate:
event['immediate'] = True
for client in clients.values():
if client.accepts_event(event):
client.add_event(event.copy())
def setup_event_queue(port: int) -> None:
if not settings.TEST_SUITE:
load_event_queues(port)
atexit.register(dump_event_queues, port)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
tornado.autoreload.add_reload_hook(lambda: dump_event_queues(port))
try:
os.rename(persistent_queue_filename(port), persistent_queue_filename(port, last=True))
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(lambda: gc_event_queues(port),
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
pc.start()
send_restart_events(immediate=settings.DEVELOPMENT)
def fetch_events(query: Mapping[str, Any]) -> Dict[str, Any]:
queue_id = query["queue_id"] # type: str
dont_block = query["dont_block"] # type: bool
last_event_id = query["last_event_id"] # type: int
user_profile_id = query["user_profile_id"] # type: int
new_queue_data = query.get("new_queue_data") # type: Optional[MutableMapping[str, Any]]
user_profile_email = query["user_profile_email"] # type: str
client_type_name = query["client_type_name"] # type: str
handler_id = query["handler_id"] # type: int
try:
was_connected = False
orig_queue_id = queue_id
extra_log_data = ""
if queue_id is None:
if dont_block:
client = allocate_client_descriptor(new_queue_data)
queue_id = client.event_queue.id
else:
raise JsonableError(_("Missing 'queue_id' argument"))
else:
if last_event_id is None:
raise JsonableError(_("Missing 'last_event_id' argument"))
client = get_client_descriptor(queue_id)
if client is None:
raise BadEventQueueIdError(queue_id)
if user_profile_id != client.user_profile_id:
raise JsonableError(_("You are not authorized to get events from this queue"))
client.event_queue.prune(last_event_id)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
response = dict(events=client.event_queue.contents(),
handler_id=handler_id) # type: Dict[str, Any]
if orig_queue_id is None:
response['queue_id'] = queue_id
if len(response["events"]) == 1:
extra_log_data = "[%s/%s/%s]" % (queue_id, len(response["events"]),
response["events"][0]["type"])
else:
extra_log_data = "[%s/%s]" % (queue_id, len(response["events"]))
if was_connected:
extra_log_data += " [was connected]"
return dict(type="response", response=response, extra_log_data=extra_log_data)
# After this point, dont_block=False, the queue is empty, and we
# have a pre-existing queue, so we wait for new events.
if was_connected:
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_email,
client_type_name))
except JsonableError as e:
return dict(type="error", exception=e)
client.connect_handler(handler_id, client_type_name)
return dict(type="async")
# The following functions are called from Django
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp: requests.Response) -> Dict[str, Any]:
if requests_json_is_function:
return resp.json()
else:
return resp.json # type: ignore # mypy trusts the stub, not the runtime type checking of this fn
def request_event_queue(user_profile: UserProfile, user_client: Client, apply_markdown: bool,
client_gravatar: bool, queue_lifespan_secs: int,
event_types: Optional[Iterable[str]]=None,
all_public_streams: bool=False,
narrow: Iterable[Sequence[str]]=[]) -> Optional[str]:
if settings.TORNADO_SERVER:
tornado_uri = get_tornado_uri(user_profile.realm)
req = {'dont_block': 'true',
'apply_markdown': ujson.dumps(apply_markdown),
'client_gravatar': ujson.dumps(client_gravatar),
'all_public_streams': ujson.dumps(all_public_streams),
'client': 'internal',
'user_profile_id': user_profile.id,
'user_client': user_client.name,
'narrow': ujson.dumps(narrow),
'secret': settings.SHARED_SECRET,
'lifespan_secs': queue_lifespan_secs}
if event_types is not None:
req['event_types'] = ujson.dumps(event_types)
try:
resp = requests_client.post(tornado_uri + '/api/v1/events/internal',
data=req)
except requests.adapters.ConnectionError:
logging.error('Tornado server does not seem to be running, check %s '
'and %s for more information.' %
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
raise requests.adapters.ConnectionError(
"Django cannot connect to Tornado server (%s); try restarting" %
(tornado_uri,))
resp.raise_for_status()
return extract_json_response(resp)['queue_id']
return None
def get_user_events(user_profile: UserProfile, queue_id: str, last_event_id: int) -> List[Dict[Any, Any]]:
if settings.TORNADO_SERVER:
tornado_uri = get_tornado_uri(user_profile.realm)
post_data = {
'queue_id': queue_id,
'last_event_id': last_event_id,
'dont_block': 'true',
'user_profile_id': user_profile.id,
'secret': settings.SHARED_SECRET,
'client': 'internal'
} # type: Dict[str, Any]
resp = requests_client.post(tornado_uri + '/api/v1/events/internal',
data=post_data)
resp.raise_for_status()
return extract_json_response(resp)['events']
return []
# Send email notifications to idle users
# after they are idle for 1 hour
NOTIFY_AFTER_IDLE_HOURS = 1
def build_offline_notification(user_profile_id: int, message_id: int) -> Dict[str, Any]:
return {"user_profile_id": user_profile_id,
"message_id": message_id,
"timestamp": time.time()}
def missedmessage_hook(user_profile_id: int, client: ClientDescriptor, last_for_client: bool) -> None:
"""The receiver_is_off_zulip logic used to determine whether a user
has no active client suffers from a somewhat fundamental race
condition. If the client is no longer on the Internet,
receiver_is_off_zulip will still return true for
IDLE_EVENT_QUEUE_TIMEOUT_SECS, until the queue is
garbage-collected. This would cause us to reliably miss
push/email notifying users for messages arriving during the
IDLE_EVENT_QUEUE_TIMEOUT_SECS after they suspend their laptop (for
example). We address this by, when the queue is garbage-collected
at the end of those 10 minutes, checking to see if it's the last
one, and if so, potentially triggering notifications to the user
at that time, resulting in at most a IDLE_EVENT_QUEUE_TIMEOUT_SECS
delay in the arrival of their notifications.
As Zulip's APIs get more popular and the mobile apps start using
long-lived event queues for perf optimization, future versions of
this will likely need to replace checking `last_for_client` with
something more complicated, so that we only consider clients like
web browsers, not the mobile apps or random API scripts.
"""
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
for event in client.event_queue.contents():
if event['type'] != 'message':
continue
assert 'flags' in event
flags = event.get('flags')
mentioned = 'mentioned' in flags and 'read' not in flags
private_message = event['message']['type'] == 'private'
# stream_push_notify is set in process_message_event.
stream_push_notify = event.get('stream_push_notify', False)
stream_email_notify = event.get('stream_email_notify', False)
stream_name = None
if not private_message:
stream_name = event['message']['display_recipient']
# Since one is by definition idle, we don't need to check always_push_notify
always_push_notify = False
# Since we just GC'd the last event queue, the user is definitely idle.
idle = True
message_id = event['message']['id']
# Pass on the information on whether a push or email notification was already sent.
already_notified = dict(
push_notified = event.get("push_notified", False),
email_notified = event.get("email_notified", False),
)
maybe_enqueue_notifications(user_profile_id, message_id, private_message, mentioned,
stream_push_notify, stream_email_notify, stream_name,
always_push_notify, idle, already_notified)
def receiver_is_off_zulip(user_profile_id: int) -> bool:
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
off_zulip = len(message_event_queues) == 0
return off_zulip
def maybe_enqueue_notifications(user_profile_id: int, message_id: int, private_message: bool,
mentioned: bool, stream_push_notify: bool,
stream_email_notify: bool, stream_name: Optional[str],
always_push_notify: bool, idle: bool,
already_notified: Dict[str, bool]) -> Dict[str, bool]:
"""This function has a complete unit test suite in
`test_enqueue_notifications` that should be expanded as we add
more features here."""
notified = dict() # type: Dict[str, bool]
if (idle or always_push_notify) and (private_message or mentioned or stream_push_notify):
notice = build_offline_notification(user_profile_id, message_id)
if private_message:
notice['trigger'] = 'private_message'
elif mentioned:
notice['trigger'] = 'mentioned'
elif stream_push_notify:
notice['trigger'] = 'stream_push_notify'
else:
raise AssertionError("Unknown notification trigger!")
notice['stream_name'] = stream_name
if not already_notified.get("push_notified"):
queue_json_publish("missedmessage_mobile_notifications", notice)
notified['push_notified'] = True
# Send missed_message emails if a private message or a
# mention. Eventually, we'll add settings to allow email
# notifications to match the model of push notifications
# above.
if idle and (private_message or mentioned or stream_email_notify):
notice = build_offline_notification(user_profile_id, message_id)
if private_message:
notice['trigger'] = 'private_message'
elif mentioned:
notice['trigger'] = 'mentioned'
elif stream_email_notify:
notice['trigger'] = 'stream_email_notify'
else:
raise AssertionError("Unknown notification trigger!")
notice['stream_name'] = stream_name
if not already_notified.get("email_notified"):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified['email_notified'] = True
return notified
ClientInfo = TypedDict('ClientInfo', {
'client': ClientDescriptor,
'flags': Optional[Iterable[str]],
'is_sender': bool,
})
def get_client_info_for_message_event(event_template: Mapping[str, Any],
users: Iterable[Mapping[str, Any]]) -> Dict[str, ClientInfo]:
'''
Return client info for all the clients interested in a message.
This basically includes clients for users who are recipients
of the message, with some nuances for bots that auto-subscribe
to all streams, plus users who may be mentioned, etc.
'''
send_to_clients = {} # type: Dict[str, ClientInfo]
sender_queue_id = event_template.get('sender_queue_id', None) # type: Optional[str]
def is_sender_client(client: ClientDescriptor) -> bool:
return (sender_queue_id is not None) and client.event_queue.id == sender_queue_id
# If we're on a public stream, look for clients (typically belonging to
# bots) that are registered to get events for ALL streams.
if 'stream_name' in event_template and not event_template.get("invite_only"):
realm_id = event_template['realm_id']
for client in get_client_descriptors_for_realm_all_streams(realm_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=[],
is_sender=is_sender_client(client)
)
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=flags,
is_sender=is_sender_client(client)
)
return send_to_clients
def process_message_event(event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]) -> None:
send_to_clients = get_client_info_for_message_event(event_template, users)
presence_idle_user_ids = set(event_template.get('presence_idle_user_ids', []))
wide_dict = event_template['message_dict'] # type: Dict[str, Any]
sender_id = wide_dict['sender_id'] # type: int
message_id = wide_dict['id'] # type: int
message_type = wide_dict['type'] # type: str
sending_client = wide_dict['client'] # type: str
@cachify
def get_client_payload(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
dct = copy.deepcopy(wide_dict)
MessageDict.finalize_payload(dct, apply_markdown, client_gravatar)
return dct
# Extra user-specific data to include
extra_user_data = {} # type: Dict[int, Any]
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
# If the recipient was offline and the message was a single or group PM to them
# or they were @-notified potentially notify more immediately
private_message = message_type == "private" and user_profile_id != sender_id
mentioned = 'mentioned' in flags and 'read' not in flags
stream_push_notify = user_data.get('stream_push_notify', False)
stream_email_notify = user_data.get('stream_email_notify', False)
# We first check if a message is potentially mentionable,
# since receiver_is_off_zulip is somewhat expensive.
if private_message or mentioned or stream_push_notify or stream_email_notify:
idle = receiver_is_off_zulip(user_profile_id) or (user_profile_id in presence_idle_user_ids)
always_push_notify = user_data.get('always_push_notify', False)
stream_name = event_template.get('stream_name')
result = maybe_enqueue_notifications(user_profile_id, message_id, private_message,
mentioned, stream_push_notify, stream_email_notify,
stream_name, always_push_notify, idle, {})
result['stream_push_notify'] = stream_push_notify
result['stream_email_notify'] = stream_email_notify
extra_user_data[user_profile_id] = result
for client_data in send_to_clients.values():
client = client_data['client']
flags = client_data['flags']
is_sender = client_data.get('is_sender', False) # type: bool
extra_data = extra_user_data.get(client.user_profile_id, None) # type: Optional[Mapping[str, bool]]
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
message_dict = get_client_payload(client.apply_markdown, client.client_gravatar)
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type_name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
user_event = dict(type='message', message=message_dict, flags=flags) # type: Dict[str, Any]
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get('local_id', None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if ('mirror' in sending_client and
sending_client.lower() == client.client_type_name.lower()):
continue
client.add_event(user_event)
def process_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(dict(event))
def process_userdata_event(event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]) -> None:
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def process_message_update_event(event_template: Mapping[str, Any],
users: Iterable[Mapping[str, Any]]) -> None:
prior_mention_user_ids = set(event_template.get('prior_mention_user_ids', []))
mention_user_ids = set(event_template.get('mention_user_ids', []))
presence_idle_user_ids = set(event_template.get('presence_idle_user_ids', []))
stream_push_user_ids = set(event_template.get('stream_push_user_ids', []))
stream_email_user_ids = set(event_template.get('stream_email_user_ids', []))
push_notify_user_ids = set(event_template.get('push_notify_user_ids', []))
stream_name = event_template.get('stream_name')
message_id = event_template['message_id']
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
maybe_enqueue_notifications_for_message_update(
user_profile_id=user_profile_id,
message_id=message_id,
stream_name=stream_name,
prior_mention_user_ids=prior_mention_user_ids,
mention_user_ids=mention_user_ids,
presence_idle_user_ids=presence_idle_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
push_notify_user_ids=push_notify_user_ids,
)
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def maybe_enqueue_notifications_for_message_update(user_profile_id: UserProfile,
message_id: int,
stream_name: str,
prior_mention_user_ids: Set[int],
mention_user_ids: Set[int],
presence_idle_user_ids: Set[int],
stream_push_user_ids: Set[int],
stream_email_user_ids: Set[int],
push_notify_user_ids: Set[int]) -> None:
private_message = (stream_name is None)
if private_message:
# We don't do offline notifications for PMs, because
# we already notified the user of the original message
return
if (user_profile_id in prior_mention_user_ids):
# Don't spam people with duplicate mentions. This is
# especially important considering that most message
# edits are simple typo corrections.
return
stream_push_notify = (user_profile_id in stream_push_user_ids)
stream_email_notify = (user_profile_id in stream_email_user_ids)
if stream_push_notify or stream_email_notify:
# Currently we assume that if this flag is set to True, then
# the user already was notified about the earlier message,
# so we short circuit. We may handle this more rigorously
# in the future by looking at something like an AlreadyNotified
# model.
return
# We can have newly mentioned people in an updated message.
mentioned = (user_profile_id in mention_user_ids)
always_push_notify = user_profile_id in push_notify_user_ids
idle = (user_profile_id in presence_idle_user_ids) or \
receiver_is_off_zulip(user_profile_id)
maybe_enqueue_notifications(
user_profile_id=user_profile_id,
message_id=message_id,
private_message=private_message,
mentioned=mentioned,
stream_push_notify=stream_push_notify,
stream_email_notify=stream_email_notify,
stream_name=stream_name,
always_push_notify=always_push_notify,
idle=idle,
already_notified={},
)
def process_notification(notice: Mapping[str, Any]) -> None:
event = notice['event'] # type: Mapping[str, Any]
users = notice['users'] # type: Union[List[int], List[Mapping[str, Any]]]
start_time = time.time()
if event['type'] == "message":
process_message_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "update_message":
process_message_update_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "delete_message":
process_userdata_event(event, cast(Iterable[Mapping[str, Any]], users))
else:
process_event(event, cast(Iterable[int], users))
logging.debug("Tornado: Event %s for %s users took %sms" % (
event['type'], len(users), int(1000 * (time.time() - start_time))))
# Runs in the Django process to send a notification to Tornado.
#
# We use JSON rather than bare form parameters, so that we can represent
# different types and for compatibility with non-HTTP transports.
def send_notification_http(realm: Realm, data: Mapping[str, Any]) -> None:
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
tornado_uri = get_tornado_uri(realm)
requests_client.post(tornado_uri + '/notify_tornado', data=dict(
data = ujson.dumps(data),
secret = settings.SHARED_SECRET))
else:
process_notification(data)
def send_event(realm: Realm, event: Mapping[str, Any],
users: Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None:
"""`users` is a list of user IDs, or in the case of `message` type
events, a list of dicts describing the users and metadata about
the user/message pair."""
port = get_tornado_port(realm)
queue_json_publish(notify_tornado_queue_name(port),
dict(event=event, users=users),
lambda *args, **kwargs: send_notification_http(realm, *args, **kwargs))
| 44.732478 | 119 | 0.654566 |
6715f79511639f21c7c8b7b03d43f0af95b12802 | 1,520 | py | Python | src/script_convert.py | ybsat/NZDairyForecast | fba62dc4c89d2737af0fc986f413a4a4c0aaa585 | [
"MIT"
] | null | null | null | src/script_convert.py | ybsat/NZDairyForecast | fba62dc4c89d2737af0fc986f413a4a4c0aaa585 | [
"MIT"
] | null | null | null | src/script_convert.py | ybsat/NZDairyForecast | fba62dc4c89d2737af0fc986f413a4a4c0aaa585 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import os
import json
import pandas as pd
# must correct the path"
with open("trading_data2.json", "r") as read_file:
data = json.load(read_file)
row = data[0]
temp = {k: [v] for k, v in row['EventSummary'].items()}
events = pd.DataFrame.from_dict(temp)
events = events.drop(events.index[0])
prdrows = row['ProductGroupResult']
subrow = prdrows[0]
temp = {k: [v] for k, v in subrow.items()}
prdgrps = pd.DataFrame.from_dict(temp)
prdgrps['key'] = ''
prdgrps = prdgrps.drop(prdgrps.index[0])
for row in data:
key = row['key']
temp = {k: [v] for k, v in row['EventSummary'].items()}
event = pd.DataFrame.from_dict(temp)
events = pd.concat([events, event], ignore_index=True)
for prd in row['ProductGroupResult']:
temp = {k: [v] for k, v in prd.items()}
prdgrp = pd.DataFrame.from_dict(temp)
prdgrp['key'] = key
prdgrps = pd.concat([prdgrps,prdgrp], ignore_index=True)
prdgrps = prdgrps.loc[prdgrps.ProductGroupCode.isin(['AMF','WMP','Butter','BMP','SMP']),: ]
events = events.set_index("EventGUID")
prdgrps.rename(columns={'key':'EventGUID'}, inplace=True)
#prdsgrps = prdgrps.set_index(["EventGUID","ProductGroupCode"])
events = events.reset_index()
joined = prdgrps.set_index("EventGUID").join(events.set_index("EventGUID"),lsuffix = '_caller', rsuffix='_other')
prdgrps.to_csv('products.csv')
events.to_csv('events.csv')
joined.to_csv('python_joined.csv',index = False)
| 27.636364 | 113 | 0.678289 |
e63cdcbce0a2d19a7d44f756a261a1022c2c513d | 3,933 | py | Python | test/unit/config/test_reload_config.py | julozi/galaxy | 90d9da03975f254ac128747cd04532c3595d6155 | [
"CC-BY-3.0"
] | 2 | 2017-03-28T12:11:41.000Z | 2017-04-22T02:58:25.000Z | test/unit/config/test_reload_config.py | userssss/galaxy | 9662164ad68b39adf5a5606a7aa8e388f6a79f1e | [
"CC-BY-3.0"
] | 2 | 2019-04-03T15:37:17.000Z | 2019-04-03T19:37:09.000Z | test/unit/config/test_reload_config.py | userssss/galaxy | 9662164ad68b39adf5a5606a7aa8e388f6a79f1e | [
"CC-BY-3.0"
] | 1 | 2019-01-16T22:21:54.000Z | 2019-01-16T22:21:54.000Z | from galaxy import config
from galaxy.config import reload_config_options
R1, R2, N1, N2 = 'reloadable1', 'reloadable2', 'nonrelodable1', 'nonreloadable2' # config options
class MockGalaxyAppConfiguration():
def __init__(self, properties):
self.config_file = None
self.reloadable_options = {R1, R2}
self._raw_config = {}
for key, value in properties.items():
setattr(self, key, value)
self._raw_config[key] = value
def update_reloadable_property(self, key, value):
setattr(self, key, value)
def test_update_property(monkeypatch):
# This also covers adding a property. When a config file does not set a property,
# that property is set to its default value. Thus, if we add a reloadable property
# to the config file, it's the same as modifying that property's value.
appconfig = MockGalaxyAppConfiguration({R1: 1, R2: 2, N1: 3})
def mock_read_properties_from_file(values):
return {R1: 1, R2: 42, N1: 99} # edits: R2, N1 modified
monkeypatch.setattr(config, 'read_properties_from_file', mock_read_properties_from_file)
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
assert getattr(appconfig, N1) == 3
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 1 # no change
assert getattr(appconfig, R2) == 42 # change: reloadable option modified
assert getattr(appconfig, N1) == 3 # no change: option modified but is non-relodable
def test_overwrite_reloadable_attribute(monkeypatch):
# This is similar to test_update_property, but here we overwrite the attribute before reloading.
# This can happen if a config property is modified AFTER it has been loaded from schema or kwargs.
# For example: load `foo` (from schema or kwargs), but then, in a # subsequent step while initializing
# GalaxyAppConfiguration, do something like this: `foo = resove_path(foo, bar)`. Now the value of `foo`
# is not what was initially loaded, and if `foo` is reloadable, it will be reset to its default as soon
# as the config file is modified. To prevent this, we compare the values read from the modified file
# to the `_raw_config` dict. This test ensures this works correctly.
appconfig = MockGalaxyAppConfiguration({R1: 1, R2: 2, N1: 3})
def mock_read_properties_from_file(values):
return {R1: 1, R2: 42} # edits: R2 modified
monkeypatch.setattr(config, 'read_properties_from_file', mock_read_properties_from_file)
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
# overwrite R1
setattr(appconfig, R1, 99)
assert getattr(appconfig, R1) == 99
# then reload
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 99 # no change; should remain overwritten
assert getattr(appconfig, R2) == 42 # change: reloadable option modified
def test_cant_delete_property(monkeypatch):
# A property should not be deleted: we don't know whether it was initially
# set to a default, loaded from a config file, env var, etc. Therefore, if a property
# is removed from the config file, it will not be modified or deleted.
appconfig = MockGalaxyAppConfiguration({R1: 1, R2: 2, N1: 3, N2: 4})
def mock_read_properties_from_file(values):
return {R1: 1, N1: 3} # edits: R2, N2 deleted
monkeypatch.setattr(config, 'read_properties_from_file', mock_read_properties_from_file)
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
assert getattr(appconfig, N1) == 3
assert getattr(appconfig, N2) == 4
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 1 # no change
assert getattr(appconfig, R2) == 2 # no change: option cannot be deleted
assert getattr(appconfig, N1) == 3 # no change
assert getattr(appconfig, N2) == 4 # no change: option cannot be deleted
| 42.75 | 107 | 0.707094 |
596ebbfe48a7d049af468bffad9c7f0e53cfbe1f | 363 | py | Python | tests/numeric-utils/test_clamp.py | shinnng/platon-utils | 50f2b95279ab12bf295b430b83827d9db440da74 | [
"MIT"
] | null | null | null | tests/numeric-utils/test_clamp.py | shinnng/platon-utils | 50f2b95279ab12bf295b430b83827d9db440da74 | [
"MIT"
] | null | null | null | tests/numeric-utils/test_clamp.py | shinnng/platon-utils | 50f2b95279ab12bf295b430b83827d9db440da74 | [
"MIT"
] | null | null | null | import pytest
from platon_utils.numeric import clamp
@pytest.mark.parametrize(
"lower_bound,upper_bound,value,expected",
((5, 7, 4, 5), (5, 7, 5, 5), (5, 7, 6, 6), (5, 7, 7, 7), (5, 7, 8, 7)),
)
def test_numeric_clamp_utility(lower_bound, upper_bound, value, expected):
result = clamp(lower_bound, upper_bound, value)
assert result == expected
| 27.923077 | 75 | 0.672176 |
e9d8bd0f1f585936d45b4652fc90cdfbb0ee6e8d | 5,789 | py | Python | transpiler.py | MiguelRobledo/workshop.py | 0159eb752e3154d4eb051fdf242a9d0157ab0887 | [
"MIT"
] | 1 | 2019-05-28T15:01:13.000Z | 2019-05-28T15:01:13.000Z | transpiler.py | MiguelRobledo/workshop.py | 0159eb752e3154d4eb051fdf242a9d0157ab0887 | [
"MIT"
] | null | null | null | transpiler.py | MiguelRobledo/workshop.py | 0159eb752e3154d4eb051fdf242a9d0157ab0887 | [
"MIT"
] | null | null | null | import io
import re
import uuid
import tokenize
from helper import casify
# captures specific tokens
class Captor:
def __init__(self, output_func, start_tok, end_tok, remove_start = True, remove_end = False):
self.start_tok = start_tok
self.end_tok = end_tok
self.output_func = output_func
self.remove_start = remove_start
self.remove_end = remove_end
self.capturing = False
self.toks = []
def check_tok(self, check_tok, tok):
if check_tok[0] == tokenize.INDENT or check_tok[0] == tokenize.DEDENT:
return tok.type == check_tok[0]
else:
return tok.type == check_tok[0] and tok.string == check_tok[1]
def __call__(self, tok):
if self.capturing:
if self.check_tok(self.end_tok, tok):
out = self.output_func(self.toks)
self.toks = []
self.capturing = False
return self.remove_end, out
else:
self.toks.append(tok[:2])
return True, None
elif self.check_tok(self.start_tok, tok):
self.capturing = True
return self.remove_start, None
else:
return False, None
# watches until a specific token and then captures
class PassthroughCaptor:
def __init__(self, output_func, start_tok, start_capture_tok, end_capture_tok, remove_end = False):
self.start_tok = start_tok
self.captor = Captor(output_func, start_capture_tok, end_capture_tok, False, remove_end)
self.watching = False
def __call__(self, tok):
if self.watching:
cont, out = self.captor(tok)
if out is not None:
self.watching = False
return cont, out
elif self.captor.check_tok(self.start_tok, tok):
self.watching = True
return False, None
# struct for building switches
class Switch:
def __init__(self):
self.reset()
def reset(self):
self.name = None
self.prev_func = None
self.is_first = True
def transpile(code, case):
# returns tokens from a string
def to_tokens(string):
return tokenize.tokenize(io.BytesIO(string.encode("utf-8")).readline)
# returns a string from tokens
def to_string(toks):
return tokenize.untokenize(toks)
# returns a legal identifier from a string
def to_identifier(string):
return re.sub(r"[^A-Za-z0-9_]", "", re.sub(r"(\s|-|=)+", "_", string)).replace("_", " ").strip().replace(" ", "_")
result = []
indent = 0
switch_indent = -1
# data for the current switch
curr_switch = Switch()
# token generators
def rule_keyword(rule_name):
arguments = [ casify(arg, case) for arg in [ "set event", "add condition", "add action" ] ]
identifier = "_" + to_identifier(to_string(rule_name)) + "_" + uuid.uuid4().hex
return [
( tokenize.OP, "@" ),
( tokenize.NAME, "Rule" ),
( tokenize.OP, "(" ),
] + rule_name + [
( tokenize.OP, ")" ),
( tokenize.NEWLINE, "\n" ),
( tokenize.NAME, "def" ),
( tokenize.NAME, identifier ),
( tokenize.OP, "(" ),
( tokenize.NAME, arguments[0] ),
( tokenize.OP, "," ),
( tokenize.NAME, arguments[1] ),
( tokenize.OP, ",", ),
( tokenize.NAME, arguments[2] ),
( tokenize.OP, ")")
]
def switch_name(name):
curr_switch.name = name
return []
[
( tokenize.NAME, "def" ),
] + name + [
( tokenize.OP, "(" ),
( tokenize.OP, ")")
]
def case_condition(condition):
identifier = "_if_" + uuid.uuid4().hex
function_id = None if curr_switch.is_first else curr_switch.prev_func
cond = condition if condition else [ ( tokenize.NAME, "None" ) ]
curr_switch.prev_func = identifier
curr_switch.is_first = False
return [
( tokenize.DEDENT, "" ),
( tokenize.OP, "@" ),
( tokenize.NAME, "If" ),
( tokenize.OP, "(" ),
] + cond + [
( tokenize.OP, "," ),
( tokenize.NAME, str(function_id) ),
( tokenize.OP, ")" ),
( tokenize.NEWLINE, "\n" ),
( tokenize.NAME, "def" ),
( tokenize.NAME, identifier ),
( tokenize.OP, "(" ),
( tokenize.OP, ")" ),
( tokenize.INDENT, (indent - 1) * "\t" ),
]
def case_body(body):
b = []
for tok in body:
if tok[0] == tokenize.NEWLINE:
b.append(( tokenize.OP, "," ))
b.append(tok)
return [
( tokenize.INDENT, indent * "\t" ),
( tokenize.NAME, "return" ),
( tokenize.OP, "[" ),
( tokenize.NEWLINE, "\n" ),
( tokenize.INDENT, (indent + 1) * "\t" )
] + b + [
( tokenize.DEDENT, "" ),
( tokenize.OP, "]" ),
( tokenize.DEDENT, "" ),
( tokenize.NEWLINE, "\n" )
]
rule_name_captor = Captor(rule_keyword, (tokenize.NAME, "rule"), (tokenize.OP, ":"))
switch_name_captor = Captor(switch_name, (tokenize.NAME, "switch"), (tokenize.OP, ":"), remove_end=True)
case_condition_captor = Captor(case_condition, (tokenize.NAME, "case"), (tokenize.OP, ":"))
case_body_captor = PassthroughCaptor(case_body, (tokenize.NAME, "case"), (tokenize.INDENT, ""), (tokenize.DEDENT, ""))
captors = [ rule_name_captor, switch_name_captor, case_condition_captor, case_body_captor ]
def run_captors(captors, tok):
cont = False
out = []
for captor in captors:
c, o = captor(tok)
cont |= c
if o is not None:
out.extend(o)
return cont, out
for tok in to_tokens(code):
if tok.type == tokenize.ENCODING:
encoding = tok.string
if tok.type == tokenize.INDENT:
indent += 1
if tok.type == tokenize.DEDENT:
indent -= 1
if tok.type == tokenize.NAME and tok.string == "switch":
switch_indent = indent
if tok.type == tokenize.DEDENT and indent == switch_indent:
switch_indent = -1
result.extend(curr_switch.name + [
( tokenize.OP, "=" ),
( tokenize.NAME, curr_switch.prev_func ),
( tokenize.OP, "(" ),
( tokenize.OP, ")" ),
( tokenize.NEWLINE, "\n" )
])
curr_switch.reset()
cont, out = run_captors(captors, tok)
if out is not None:
result.extend(out)
if cont:
continue
result.append(tok[:2])
return to_string(result).decode(encoding) | 25.728889 | 119 | 0.635688 |
b5ab80f6123f3c393659e6accdce375c114121f8 | 1,859 | py | Python | python/dgllife/model/pretrain/property_prediction.py | padr31/dgl-lifesci | 932581468b330862836c0f050077fa33d0eb9405 | [
"Apache-2.0"
] | 390 | 2020-06-05T13:16:18.000Z | 2022-03-31T07:36:34.000Z | python/dgllife/model/pretrain/property_prediction.py | padr31/dgl-lifesci | 932581468b330862836c0f050077fa33d0eb9405 | [
"Apache-2.0"
] | 71 | 2020-06-12T05:26:56.000Z | 2022-03-29T06:26:39.000Z | python/dgllife/model/pretrain/property_prediction.py | padr31/dgl-lifesci | 932581468b330862836c0f050077fa33d0eb9405 | [
"Apache-2.0"
] | 113 | 2020-06-08T18:48:18.000Z | 2022-03-22T01:16:26.000Z | # -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable= no-member, arguments-differ, invalid-name
#
# Pre-trained models for molecular property prediction
from ..gnn.gin import GIN
from ..model_zoo.attentivefp_predictor import AttentiveFPPredictor
__all__ = ['property_url',
'create_property_model']
property_url = {
'AttentiveFP_Aromaticity': 'dgllife/pre_trained/attentivefp_aromaticity.pth',
'gin_supervised_contextpred': 'dgllife/pre_trained/gin_supervised_contextpred.pth',
'gin_supervised_infomax': 'dgllife/pre_trained/gin_supervised_infomax.pth',
'gin_supervised_edgepred': 'dgllife/pre_trained/gin_supervised_edgepred.pth',
'gin_supervised_masking': 'dgllife/pre_trained/gin_supervised_masking.pth'
}
def create_property_model(model_name):
"""Create a model.
Parameters
----------
model_name : str
Name for the model.
Returns
-------
Created model
"""
if model_name == 'AttentiveFP_Aromaticity':
return AttentiveFPPredictor(node_feat_size=39,
edge_feat_size=10,
num_layers=2,
num_timesteps=2,
graph_feat_size=200,
n_tasks=1,
dropout=0.2)
elif model_name in ['gin_supervised_contextpred', 'gin_supervised_infomax',
'gin_supervised_edgepred', 'gin_supervised_masking']:
return GIN(num_node_emb_list=[120, 3],
num_edge_emb_list=[6, 3],
num_layers=5,
emb_dim=300,
JK='last',
dropout=0.5)
else:
return None
| 33.196429 | 87 | 0.600861 |
dcf9e612475b9d80d6feb55c100117285f613eea | 4,271 | py | Python | score/consonance.py | ljumbam/Score | 52dfa782e1e2e6b7b4c441ebbd223e7d305ecf8e | [
"MIT"
] | null | null | null | score/consonance.py | ljumbam/Score | 52dfa782e1e2e6b7b4c441ebbd223e7d305ecf8e | [
"MIT"
] | null | null | null | score/consonance.py | ljumbam/Score | 52dfa782e1e2e6b7b4c441ebbd223e7d305ecf8e | [
"MIT"
] | null | null | null | """
Chord consonance based on frequency ratio
"""
from fractions import Fraction
from itertools import combinations
from score.base import ScoreObject
from score.note import Note
"""
TODO: Experiment with consonance. Consider always setting a tonic, and
getting the consonance of 3 or more notes by the theory described in
http://ray.tomes.biz/alex.htm
"""
class FrequencyRatios(ScoreObject):
"""
Frequency ratios of the 12 notes that fall
whithin an octave in an equal temparament scale
"""
def __init__(self):
self._ratios = [Fraction(1, 1), Fraction(15, 16), Fraction(8, 9),
Fraction(5, 6), Fraction(4, 5), Fraction(3, 4),
Fraction(5, 7), Fraction(2, 3), Fraction(5, 8),
Fraction(3, 5), Fraction(9, 16), Fraction(8, 15)]
self._errors = [0, 10, 4, 16, 14, 2, 17, 2, 14, 16, 2, 12]
@property
def ratios(self):
return self._ratios
@property
def errors(self):
return self._errors
class Consonance(FrequencyRatios):
"""Main purpose is to return a measure of the consonance
between the tonic and another note.
Example:
>>> tonic = Note('C')
>>> nte = Note('E')
>>> csn = Consonance(tonic=tonic)
>>> print (csn.get_consonance(nte))
20
"""
def __init__(self, tonic=Note('C')):
super(Consonance, self).__init__()
self._tonic = None
self._chromatic_scale = None
self.tonic = tonic
def get_consonance(self, nte):
"""Returns a measure of the consonance between the
tonic note of this class and the input, nte. The consonance
is a product of the frequency ratios between both notes.
The smaller the product, the higher the consonance
"""
self.validate_type(nte, Note)
note_number = Note.get_base_note_number(nte.name)
note_index = self._chromatic_scale.index(note_number)
ratio = self.ratios[note_index]
if ratio == float('inf'):
return ratio
else:
consonance = ratio.numerator * ratio.denominator
consonance = consonance # + consonance*self.errors[note_index] # Perfornamce is better without adding the errors
return consonance
@property
def tonic(self):
return self._tonic
@tonic.setter
def tonic(self, tonic):
self.validate_type(tonic, Note)
self._tonic = tonic
self._chromatic_scale = []
for num in range(0, 12):
#note_name = self.strip_digits(Note(tonic.number + num).name)
nte = Note(tonic.number + num)
note_number = Note.get_base_note_number(nte.name)
self._chromatic_scale.append(note_number)
@property
def chromatic_scale(self):
"""Chromatic scale by base note number
"""
return self._chromatic_scale
class ChordConsonance(ScoreObject):
"""Takes in a list of notes and returns a
consonance value
Example:
>>> con = ChordConsonance(notes=[Note('C'), Note('E')])
>>> print (con.consonance)
20
"""
def __init__(self, notes=[]):
self._notes = []
self._consonance = 0
self.notes = notes
def _set_consonance(self):
note_list = []
for nte in self._notes:
number = Note.get_base_note_number(nte.name)
if number not in note_list:
note_list.append(number)
total_consonance = 0
for note_pair in combinations(note_list, 2):
#smaller note should always be tonic
tonic = Note(min(note_pair))
nte = Note(max(note_pair))
consonance = Consonance(tonic=tonic)
total_consonance += consonance.get_consonance(nte)
self._consonance = total_consonance
@property
def consonance(self):
return self._consonance
@property
def notes(self):
return self._notes
@notes.setter
def notes(self, notes):
self.validate_type(notes, list)
for nte in notes:
self.validate_type(nte, Note)
self._notes = notes
self._set_consonance()
def main():
pass
if __name__ == '__main__':
main()
| 29.253425 | 124 | 0.610162 |
1621e7a656b53ff777c579ccb79006f2b5633a7a | 7,297 | py | Python | tests/test_transforms/test_scalers_transform.py | Carlosbogo/etna | b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94 | [
"Apache-2.0"
] | 1 | 2021-11-11T21:18:42.000Z | 2021-11-11T21:18:42.000Z | tests/test_transforms/test_scalers_transform.py | Carlosbogo/etna | b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94 | [
"Apache-2.0"
] | null | null | null | tests/test_transforms/test_scalers_transform.py | Carlosbogo/etna | b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94 | [
"Apache-2.0"
] | null | null | null | from typing import Any
from typing import List
from typing import Optional
from typing import Union
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.transforms import MaxAbsScalerTransform
from etna.transforms import MinMaxScalerTransform
from etna.transforms import RobustScalerTransform
from etna.transforms import StandardScalerTransform
from etna.transforms.sklearn import SklearnTransform
from etna.transforms.sklearn import TransformMode
class DummySkTransform:
def fit(self, X, y=None): # noqa: N803
pass
def transform(self, X, y=None): # noqa: N803
return X
def inverse_transform(self, X, y=None): # noqa: N803
return X
class DummyTransform(SklearnTransform):
def __init__(
self,
in_column: Optional[Union[str, List[str]]] = None,
inplace: bool = True,
out_column: str = None,
mode: Union[TransformMode, str] = "per-segment",
):
self.in_column = in_column
self.inplace = inplace
self.out_column = out_column
self.mode = TransformMode(mode)
super().__init__(
in_column=in_column,
inplace=inplace,
out_column=out_column if out_column is not None else self.__repr__(),
transformer=DummySkTransform(),
mode=mode,
)
@pytest.fixture
def normal_distributed_df() -> pd.DataFrame:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-07-01", freq="1d")})
generator = np.random.RandomState(seed=1)
df_1["segment"] = "Moscow"
df_1["target"] = generator.normal(loc=0, scale=10, size=len(df_1))
df_1["exog"] = generator.normal(loc=2, scale=10, size=len(df_1))
df_2["segment"] = "Omsk"
df_2["target"] = generator.normal(loc=5, scale=1, size=len(df_2))
df_2["exog"] = generator.normal(loc=3, scale=1, size=len(df_2))
classic_df = pd.concat([df_1, df_2], ignore_index=True)
return TSDataset.to_dataset(classic_df)
@pytest.mark.parametrize(
"scaler",
(
StandardScalerTransform,
RobustScalerTransform,
MinMaxScalerTransform,
MaxAbsScalerTransform,
StandardScalerTransform,
RobustScalerTransform,
MinMaxScalerTransform,
),
)
def test_transform_invalid_mode(scaler):
"""Check scaler behavior in case of invalid transform mode"""
with pytest.raises(ValueError):
_ = scaler(mode="a")
@pytest.mark.parametrize(
"scaler",
(
DummyTransform(),
StandardScalerTransform(),
RobustScalerTransform(),
MinMaxScalerTransform(),
MaxAbsScalerTransform(),
StandardScalerTransform(with_std=False),
RobustScalerTransform(with_centering=False, with_scaling=False),
MinMaxScalerTransform(feature_range=(5, 10)),
),
)
@pytest.mark.parametrize("mode", ("macro", "per-segment"))
def test_dummy_inverse_transform_all_columns(normal_distributed_df, scaler, mode):
"""Check that `inverse_transform(transform(df)) == df` for all columns."""
scaler.mode = TransformMode(mode)
feature_df = scaler.fit_transform(df=normal_distributed_df.copy())
inversed_df = scaler.inverse_transform(df=feature_df.copy())
npt.assert_array_almost_equal(normal_distributed_df.values, inversed_df.values)
@pytest.mark.parametrize(
"scaler",
(
DummyTransform(in_column="target"),
StandardScalerTransform(in_column="target"),
RobustScalerTransform(in_column="target"),
MinMaxScalerTransform(in_column="target"),
MaxAbsScalerTransform(in_column="target"),
StandardScalerTransform(in_column="target", with_std=False),
RobustScalerTransform(in_column="target", with_centering=False, with_scaling=False),
MinMaxScalerTransform(in_column="target", feature_range=(5, 10)),
),
)
@pytest.mark.parametrize("mode", ("macro", "per-segment"))
def test_dummy_inverse_transform_one_column(normal_distributed_df, scaler, mode):
"""Check that `inverse_transform(transform(df)) == df` for one column."""
scaler.mode = TransformMode(mode)
feature_df = scaler.fit_transform(df=normal_distributed_df.copy())
inversed_df = scaler.inverse_transform(df=feature_df)
npt.assert_array_almost_equal(normal_distributed_df.values, inversed_df.values)
@pytest.mark.parametrize(
"scaler",
(
DummyTransform,
StandardScalerTransform,
RobustScalerTransform,
MinMaxScalerTransform,
MaxAbsScalerTransform,
StandardScalerTransform,
RobustScalerTransform,
MinMaxScalerTransform,
),
)
@pytest.mark.parametrize("mode", ("macro", "per-segment"))
def test_inverse_transform_not_inplace(normal_distributed_df, scaler, mode):
"""Check that inversed values the same for not inplace version."""
inplace_scaler = scaler(mode=mode)
not_inplace_scaler = scaler(inplace=False, mode=mode)
columns_to_compare = pd.MultiIndex.from_tuples(
[(segment_name, not_inplace_scaler.__repr__()) for segment_name, _ in normal_distributed_df.columns]
)
inplace_feature_df = inplace_scaler.fit_transform(df=normal_distributed_df.copy())
not_inplace_feature_df = not_inplace_scaler.fit_transform(df=normal_distributed_df.copy())
inplace_feature_df.columns = columns_to_compare
npt.assert_array_almost_equal(
inplace_feature_df.loc[:, columns_to_compare].values, not_inplace_feature_df.loc[:, columns_to_compare]
)
@pytest.mark.parametrize(
"scaler",
(
DummyTransform,
StandardScalerTransform,
RobustScalerTransform,
MinMaxScalerTransform,
MaxAbsScalerTransform,
StandardScalerTransform,
RobustScalerTransform,
MinMaxScalerTransform,
),
)
@pytest.mark.parametrize("mode", ("macro", "per-segment"))
def test_interface_out_column(normal_distributed_df: pd.DataFrame, scaler: Any, mode: str):
"""Check transform interface in non inplace mode with given out_column param."""
out_column = "result"
transform = scaler(inplace=False, mode=mode, out_column=out_column)
result = transform.fit_transform(df=normal_distributed_df)
for segment in result.columns.get_level_values("segment").unique():
assert out_column in result[segment].columns
@pytest.mark.parametrize(
"scaler",
(
DummyTransform,
StandardScalerTransform,
RobustScalerTransform,
MinMaxScalerTransform,
MaxAbsScalerTransform,
StandardScalerTransform,
RobustScalerTransform,
MinMaxScalerTransform,
),
)
@pytest.mark.parametrize("mode", ("macro", "per-segment"))
def test_interface_repr(normal_distributed_df: pd.DataFrame, scaler: Any, mode: str):
"""Check transform interface in non inplace mode without given out_column param."""
transform = scaler(inplace=False, mode=mode)
excepted_column = transform.__repr__()
result = transform.fit_transform(df=normal_distributed_df)
for segment in result.columns.get_level_values("segment").unique():
assert excepted_column in result[segment].columns
| 35.945813 | 111 | 0.707825 |
f86ba51f4ebdaea70378b2257c6887129089706e | 16,208 | py | Python | libradtran/spectral_options.py | NelisW/libraddask | 3c622ec0010d79aee210d2ce7cf2b6cba79c1835 | [
"MIT"
] | 4 | 2020-04-20T21:31:25.000Z | 2022-03-23T11:33:13.000Z | libradtran/spectral_options.py | NelisW/libraddask | 3c622ec0010d79aee210d2ce7cf2b6cba79c1835 | [
"MIT"
] | null | null | null | libradtran/spectral_options.py | NelisW/libraddask | 3c622ec0010d79aee210d2ce7cf2b6cba79c1835 | [
"MIT"
] | 1 | 2021-05-23T05:09:24.000Z | 2021-05-23T05:09:24.000Z |
"""--------------------------------------------------------------------
* $Id: spectral_options.py 3145 2015-08-04 13:35:30Z josef.gasteiger $
*
* This file is part of libRadtran.
* Copyright (c) 1997-2012 by Arve Kylling, Bernhard Mayer,
* Claudia Emde, Robert Buras
*
* ######### Contact info: http://www.libradtran.org #########
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*--------------------------------------------------------------------"""
#from . import option_definition
#from . import GUI_definition
from libraddask.libradtran import option_definition
from libraddask.libradtran import GUI_definition
import io
class setup_spectral_group():
group_name = 'Spectral/Source'
def __init__(self):
documentation = get_spectral_documentation()
wavelength = option_definition.option(
name='wavelength',
group='spectral',
helpstr='Set the wavelength range by specifying first and last wavelength in nm.',
documentation=documentation['wavelength'],
gui_inputs=(GUI_definition.FloatInput(name='Input.wl.start', default=None, valid_range=[0, 1000000.0]),
GUI_definition.FloatInput(name='Input.wl.end', default=None, valid_range=[0, 1000000.0], optional=True),),
tokens = [option_definition.addToken(name='Input.wl.start', datatype=float, default='NOT_DEFINED_FLOAT', valid_range=[0,1e6]),
option_definition.addToken(name='Input.wl.end', datatype=float, default='NOT_DEFINED_FLOAT', valid_range=[0,1e6], optional=True)],
#TODO:argument 2 should be bigger that argument 1
parents=['uvspec'],
non_parents=['wavelength_index'],
)
wavelength_step = option_definition.option(
name='wavelength_step',
group='spectral',
helpstr='Set the wavelength step (in nm) in conjunction with the wavelength range.',
documentation=['wavelength_step'],
gui_inputs=(),
tokens=[],
parents=['uvspec'],
non_parents=['wavelength_index'],
)
wavelength_index = option_definition.option(
name='wavelength_index',
group='spectral',
helpstr='Set the wavelengths to be selected.',
documentation=documentation['wavelength_index'],
gui_inputs=(GUI_definition.IntegerInput(name='Input.wl.start_index'),
GUI_definition.IntegerInput(name='Input.wl.end_index',
optional=True),),
tokens = [option_definition.addToken(name='Input.wl.start_index', datatype=int, default='NOT_DEFINED_INTEGER',),
option_definition.addToken(name='Input.wl.end_index', datatype=int, default='NOT_DEFINED_INTEGER', optional=True)],
parents=['uvspec'],
non_parents=['wavelength'],
)
wavelength_grid_file = option_definition.option(
name='wavelength_grid_file',
group='spectral',
helpstr='Location of file with wavelength grid used for the internal transmittance calculations.',
documentation=documentation['wavelength_grid_file'],
gui_inputs=(GUI_definition.FileInput(name='Input.filename[FN_WLTRANS]'),),
tokens=option_definition.addToken(name='Input.filename[FN_WLTRANS]', datatype=io.IOBase),
parents=['uvspec'],
non_parents=['thermal_bands_file'],
)
thermal_bands_file = option_definition.option(
name='thermal_bands_file',
group='spectral',
helpstr='File with the center wavelengths and the wavelength band intervals for calculations in the thermal range.',
documentation=documentation['thermal_bands_file'],
gui_inputs=(GUI_definition.FileInput(name='Input.filename[FN_WLBANDS]'),),
tokens=option_definition.addToken(name='Input.filename[FN_WLBANDS]', datatype=io.IOBase),
parents=['uvspec'],
non_parents=['filter_function_file','slit_function_file'],
)
thermal_bandwidth = option_definition.option(
name='thermal_bandwidth',
group='spectral',
documentation=documentation['thermal_bandwidth'],
gui_inputs=(GUI_definition.FloatInput(name='Input.bandwidth', valid_range=[0, 1000000.0]), GUI_definition.ListInput(name='Input.bandwidth_unit', valid_range=['','nm', 'cm-1'], optional=True),),
tokens = [option_definition.addToken(name='Input.bandwidth', datatype=float, valid_range=[0,1e6]),
option_definition.addLogical(name='Input.bandwidth_unit', logicals=['nm','cm-1'], setting='UNIT_PER_', optional=True)],
parents=['uvspec'],
)
source = option_definition.option(
name='source',
group='spectral',
helpstr='Source of radiation.',
documentation=documentation['source'],
gui_inputs=(GUI_definition.ListInput(name='Input.source', valid_range=['thermal', 'solar']),
GUI_definition.FileInput(name='Input.filename[FN_EXTRATERRESTRIAL]', optional=True),
GUI_definition.ListInput(name='Input.spectrum_unit', valid_range=['','per_nm', 'per_cm-1', 'per_band'], optional=True),),
tokens= [option_definition.addLogical(name='Input.source', logicals=['thermal', 'solar'], setting='SRC_'),
option_definition.addToken(name='Input.filename[FN_EXTRATERRESTRIAL]', datatype=io.IOBase, optional=True),
option_definition.addLogical(name='Input.spectrum_unit', logicals=['per_nm', 'per_cm-1', 'per_band'], setting='UNIT_', optional=True)],
parents=['uvspec'],
plot = {'plot_type': '2D',
'optional_args': {'column_names': (
"wavelength",
"extraterrestrial flux",)
}
}
)
mc_sun_angular_size = option_definition.option(
name='mc_sun_angular_size',
group='spectral',
documentation=documentation['mc_sun_angular_size'],
tokens=option_definition.addToken(name='Input.rte.mc.sun_radius',datatype=float),
threedmystic=True,
showInGui=False,
)
mc_lidar = option_definition.option(
name='mc_lidar',
group='spectral',
helpstr='Use local estimator to simulate a lidar.',
documentation=documentation['mc_lidar'],
tokens=[option_definition.addSetting(name='Input.source', setting='SRC_LIDAR'),
option_definition.addSetting(name='Input.rte.mc.escape', setting=0),
option_definition.addSetting(name='Input.rte.mc.locest', setting='MCLIDAR_SPACE'),
option_definition.addLogical(name='Input.rte.mc.locest', logicals=['ascope','polarize','space','falcon','simple','pilot','moon','test'], setting='MCLIDAR_', optional=True)],
parents=['uvspec'],
non_parents=['source'],
showInGui=False,
islidar=True
)
mc_lidar_file = option_definition.option(
name='mc_lidar_file',
group='spectral',
helpstr='File containing positions, looking directions, and opening angles of lasers and detectors for lidar simulations in MYSTIC.',
documentation=documentation['mc_lidar_file'],
tokens=option_definition.addToken(name='Input.rte.mc.filename[FN_MC_LIDAR]',datatype=io.IOBase),
parents=['uvspec'],
non_parents=['source'],
showInGui=False,
islidar=True
)
mc_radar = option_definition.option(
name='mc_radar',
group='spectral',
helpstr='Switch on radar.',
documentation=documentation['mc_radar'],
tokens=[option_definition.addSetting(name='Input.source', setting='SRC_LIDAR'),
option_definition.addSetting(name='Input.rte.mc.escape', setting=0),
option_definition.addSetting(name='Input.rte.mc.locest', setting='MCRADAR')],
parents=['uvspec'],
non_parents=['source'],
showInGui=False,
islidar=True
)
self.options = [wavelength, wavelength_index, wavelength_grid_file,
thermal_bands_file, thermal_bandwidth,
source,
mc_lidar, mc_lidar_file, mc_radar,
mc_sun_angular_size, ]
def __iter__(self):
return iter(self.options)
def get_documentation():
return get_spectral_documentation()
def get_spectral_documentation():
return {
'mc_sun_angular_size' : r'''
At the moment only useful together with \code{mc_panorama_view}.
Set the angular radius of the sun in degrees. If omitted the radius is calculated
(more or less correctly) via the earth-sun distance (not well tested).
If no \code{mc_backward_sun_shape_file} is given a spectral sun shape according to
Koepke2001 is used.
''',
'mc_lidar' : r'''
Use local estimator to simulate a lidar. If \code{mc_lidar} is set,
you need to provide a lidar file for \code{mc_lidar_file}. A detailed
documentation is available on request from Robert Buras.
''',
'mc_lidar_file' : r'''
File containing positions, looking directions, and opening angles of
lasers and detectors for lidar simulations in MYSTIC. Only meaningful
with \code{mc_lidar}.
\fcode{
mc_lidar_file file
}
''',
'mc_radar' : r'''
Switch on radar, works exactly like lidar, so use with \code{mc_lidar_file}.
Use \code{mc_ris} to get good statistics. Has not been tested with \code{mc_vroom},
it's recommended to switch that off. Use with \code{write_output_as_netcdf} to get
additional reflectivity factor output in a mmclx-file (netcdf format).
''',
'source' : r'''
Set the radiation source type
\fcode{
source type
}
where \code{type} is either \code{solar} or \code{thermal}.
Solar radiation is per default output in W/(m2 nm) if no \code{mol\_abs\_param} is specified
or \code{mol\_abs\_param} options \code{crs}, \code{lowtran}, or \code{reptran} are specified.
For all other \code{mol\_abs\_param} options
the output is integrated over the wavelength band.
Thermal radiation is per default output in W/(m2 cm-1), if REPTRAN is used or the bandwidth
is equal to 1 cm-1 (default for \code{mol\_abs\_param lowtran}).
Otherwise the output is the integrated flux over the
wavenumber interval specified by \code{thermal\_bandwidth}, \code{thermal\_bands\_file},
or by the \code{mol\_abs\_param} option (\code{kato}, \code{kato2}, \code{kato2.96},
\code{fu}, or \code{avhrr\_kratz}).
\fcode{
source type [file] [unit]
}
The second argument \code{file} specifies the location of file holding the extraterrestrial spectrum.
In general, \code{file} is required for solar calculations if \code{mol\_abs\_param} is not used.
\code{file} is ignored if \code{mol\_abs\_param} other than \code{lowtran} oder \code{reptran} is specified.
The file must contain two columns. Column 1 is the wavelength in nm, and column 2
the corresponding extraterrestrial flux. The user may freely use any units
he/she wants for the extraterrestrial flux. The wavelength specified grid
defines the wavelength resolution at which results are returned. However,
the wavelength range is determined by \code{wavelength}. \code{file} may be
omitted for thermal radiation calculations (\code{source thermal}) as well as
\code{output_quantity transmittance} and \code{output_quantity reflectivity} calculations. If omitted, the
output resolution equals the internal wavelength grid which the model chooses
for the radiative transfer calculation.
Comments start with \code{\#}. Empty lines are ignored.
For some purposes it is useful to tell libRadtran the units of the spectrum.
This can be done with the optional third argument.
Possible choises for \code{unit} are \code{per\_nm}, \code{per\_cm-1} or \code{per\_band}.
If \code{unit} is set to \code{per\_nm}
libRadtran assumes that the unit of the spectrum is W/(m2 nm), if set to \code{per\_cm-1}
it assumes W/(m2 cm-1).
''',
'thermal_bandwidth' : r'''
Specify a constant bandwidth in cm-1 for thermal calculations.
\fcode{
thermal\_bandwidth value
}
The default is 1 cm-1.
This option is ignored if used together with \code{mol\_abs\_param kato/kato2/kato2.96/fu/avhrr\_kratz}.
''',
'thermal_bands_file' : r'''
File with the center wavelengths and the wavelength band intervals to be used for
calculations in the thermal range.
\fcode{
thermal\_bands\_file file
}
The following three columns are expected:
center (or reference) wavelength, lower wavelength limit, upper wavelength limit [nm].
\code{thermal\_bands\_file} defines the wavelength grid for the radiative transfer
calculation. The RTE solver is called for each of the wavelengths in the first column.
The atmospheric (scattering, absorption, etc) properties are also evaluated at these
wavelengths. For thermal radiation calculations, the Planck function is integrated
over the wavelength bands defined in the second and third columns. The result will
therefore be a band-integrated irradiance which does only make sense when the
\code{source solar file} grid equals the \code{thermal\_bands\_file} grid.
''',
'wavelength_grid_file' : r'''
Location of single column file that sets the wavelength grid used for the
internal transmittance calculations.
\fcode{
wavelength\_grid\_file file
}
The wavelengths must be in nm.
Do not use this option unless you know what you are doing.
Comments start with \code{\#}. Empty lines are ignored.
''',
'wavelength' : r'''
Set the wavelength range by specifying first and last wavelength in nm.
\fcode{
wavelength lambda\_0 lambda\_1
}
The default output wavelength grid is that defined in \code{source solar file},
unless \code{spline} is specified. Note that the radiative transfer calculations
are done on an internal grid which can be influenced with \code{wavelength\_grid\_file}
or \code{mol\_tau\_file abs file}
''',
'wavelength_index' : r'''
Set the wavelengths to be selected. To be used together with predefined wavelength grids,
such as \code{wavelength\_grid\_file}, \code{mol\_tau\_file abs file} and particularly
useful in combination with the \code{mol\_abs\_param} option where often only a
specified number of wavelength bands is required. E.g., in combination with
\code{mol\_abs\_param AVHRR\_KRATZ}, \code{wavelength\_index 15 15} will select wavelength
index 15 which corresponds to channel 4, or \code{wavelength\_index 10 14} will select
those bands required for channel 3. Indices start from 1.
''',
}
| 49.115152 | 205 | 0.64752 |
6470ce9a91cea989e98aee17418d426c6078bf20 | 3,704 | py | Python | lib/galaxy/web/framework/middleware/sentry.py | BalthazarPavot/galaxy_project_reports | aa397de11a9a3425f85c701087af4c5d165b571f | [
"CC-BY-3.0"
] | 1 | 2019-07-03T08:13:57.000Z | 2019-07-03T08:13:57.000Z | lib/galaxy/web/framework/middleware/sentry.py | LainOldAccound/galaxy_project_reports | aa397de11a9a3425f85c701087af4c5d165b571f | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/web/framework/middleware/sentry.py | LainOldAccound/galaxy_project_reports | aa397de11a9a3425f85c701087af4c5d165b571f | [
"CC-BY-3.0"
] | null | null | null | """
raven.middleware
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from raven import Client
from raven.utils.wsgi import get_current_url, get_headers, get_environ
except:
Client = None
RAVEN_IMPORT_MESSAGE = ('The Python raven package is required to use this '
'feature, please install it')
class Sentry(object):
"""
A WSGI middleware which will attempt to capture any
uncaught exceptions and send them to Sentry.
"""
def __init__(self, application, dsn):
assert Client is not None, RAVEN_IMPORT_MESSAGE
self.application = application
self.client = Client( dsn )
def __call__(self, environ, start_response):
try:
iterable = self.application(environ, start_response)
except Exception:
self.handle_exception(environ)
raise
try:
for event in iterable:
yield event
except Exception:
self.handle_exception(environ)
raise
finally:
# wsgi spec requires iterable to call close if it exists
# see http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
if iterable and hasattr(iterable, 'close') and callable(iterable.close):
try:
iterable.close()
except Exception:
self.handle_exception(environ)
def handle_exception(self, environ):
headers = dict(get_headers(environ))
# Authorization header for REMOTE_USER sites consists of a base64() of
# their plaintext password. It is a security issue for this password to
# be exposed to a third party system which may or may not be under
# control of the same administrators as the local Authentication
# system. E.g. university LDAP systems.
if 'Authorization' in headers:
# Redact so the administrator knows that a value is indeed present.
headers['Authorization'] = 'redacted'
# Passing cookies allows for impersonation of users (depending on
# remote service) and can be considered a security risk as well. For
# multiple services running alongside Galaxy on the same host, this
# could allow a sentry user with access to logs to impersonate a user
# on another service. In the case of services like IPython, this can be
# a serious concern as that would allow for terminal access. Furthermore,
# very little debugging information can be gained as a result of having
# access to all of the users cookies (including Galaxy cookies)
if 'Cookie' in headers:
headers['Cookie'] = 'redacted'
event_id = self.client.captureException(
data={
'sentry.interfaces.Http': {
'method': environ.get('REQUEST_METHOD'),
'url': get_current_url(environ, strip_querystring=True),
'query_string': environ.get('QUERY_STRING'),
# TODO
# 'data': environ.get('wsgi.input'),
'headers': headers,
'env': dict(get_environ(environ)),
}
},
# Galaxy: add request id from environment if available
extra={
'request_id': environ.get( 'request_id', 'Unknown' )
}
)
# Galaxy: store event_id in environment so we can show it to the user
environ['sentry_event_id'] = event_id[0]
return event_id
| 39.827957 | 88 | 0.605832 |
8ee2a20cf695ae6ec59ff34a8bf9356262815cff | 6,028 | py | Python | examples/django_spine/django_spine/settings.py | ikeikeikeike/django-spine | b8bf097d746b552c61a013436b0dceefa7b9207f | [
"MIT"
] | 1 | 2015-11-05T05:07:50.000Z | 2015-11-05T05:07:50.000Z | examples/django_spine/django_spine/settings.py | ikeikeikeike/django-spine | b8bf097d746b552c61a013436b0dceefa7b9207f | [
"MIT"
] | null | null | null | examples/django_spine/django_spine/settings.py | ikeikeikeike/django-spine | b8bf097d746b552c61a013436b0dceefa7b9207f | [
"MIT"
] | null | null | null | # Django settings for django_spine project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Tokyo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'ja'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'uk(@38rr2*cv8^k!_&hy-v@5=d*@+lw^an*@=*v(o)1-u&gsb7'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pipeline.middleware.MinifyHTMLMiddleware',
)
ROOT_URLCONF = 'django_spine.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'django_spine.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'subcommand',
'spine',
'pipeline',
'spineapp',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
PIPELINE_JS = {
'application': {
'source_filenames': (
'js/*.js',
'js/spine/*.js',
'js/spine/**/*.js',
'js/spineapp/lib/*.coffee',
'js/spineapp/*.coffee',
'js/spineapp/models/*.coffee',
'js/spineapp/controllers/*.coffee',
'js/spineapp/views/**/*.eco',
),
'output_filename': 'js/application.r?.js'
}
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
'spine.compiler.EcoCompiler',
)
| 32.76087 | 109 | 0.671865 |
30777cb550eeb1f619e226de26b5448e7404d6e2 | 561 | py | Python | 0x0A-python-inheritance/11-square.py | BennettDixon/holbertonschool-higher_level_programming | 3fbcd5e009548aab5539ce8610b4113f005964c4 | [
"MIT"
] | 1 | 2022-02-07T12:13:18.000Z | 2022-02-07T12:13:18.000Z | 0x0A-python-inheritance/11-square.py | BennettDixon/holbertonschool-higher_level_programming | 3fbcd5e009548aab5539ce8610b4113f005964c4 | [
"MIT"
] | null | null | null | 0x0A-python-inheritance/11-square.py | BennettDixon/holbertonschool-higher_level_programming | 3fbcd5e009548aab5539ce8610b4113f005964c4 | [
"MIT"
] | 1 | 2021-12-06T18:15:54.000Z | 2021-12-06T18:15:54.000Z | #!/usr/bin/python3
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
"""square shape class, super class is BaseGeometry, then Rectangle
"""
def __init__(self, size):
"""instantiation method for class
"""
super().__init__(size, size)
self.integer_validator("size", size)
self.__size = size
def __str__(self):
"""overide magic str method for class
"""
string = "[Square] " + str(self.__size) + '/'
string += str(self.__size)
return string
| 24.391304 | 70 | 0.593583 |
a69730dfa3be43e24616dcf81a893059818e50d3 | 1,285 | py | Python | threatminer/komand_threatminer/actions/ssdeep_sample/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2020-03-18T09:14:55.000Z | 2020-03-18T09:14:55.000Z | threatminer/komand_threatminer/actions/ssdeep_sample/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | threatminer/komand_threatminer/actions/ssdeep_sample/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | import komand
from .schema import SsdeepSampleInput, SsdeepSampleOutput
# Custom imports below
import json
import requests
class SsdeepSample(komand.Action):
API_URL = 'https://www.threatminer.org/ssdeep.php?api=True&rt=1'
def __init__(self):
super(self.__class__, self).__init__(
name='ssdeep_sample',
description='Fetches information related to a fuzzy hash',
input=SsdeepSampleInput(),
output=SsdeepSampleOutput())
def run(self, params={}):
query = params.get('query')
try:
response = requests.get(self.API_URL, params = {"q": query})
return { 'response': response.json() }
except requests.exceptions.HTTPError as e:
self.logger.error('Requests: HTTPError: status code %s for %s',
str(e.status_code), params.get('query'))
def test(self):
params = {
"q": "1536:TJsNrChuG2K6IVOTjWko8a9P6W3OEHBQc4w4:TJs0oG2KSTj3o8a9PFeEHn4l"
}
response = requests.get(self.API_URL, params=params)
if response.status_code != 200:
raise Exception('%s (HTTP status: %s)' % (response.text, response.status_code))
return {'status_code': response.status_code}
| 32.948718 | 91 | 0.620233 |
12c7ed34130c6d3ccc08188217a5a9b258ef3d9d | 7,504 | py | Python | chipy_org/settings.py | ahlusar1989/chipy.org | 01a1321db4731a9dabeea22d8462de97a9ab0880 | [
"MIT"
] | null | null | null | chipy_org/settings.py | ahlusar1989/chipy.org | 01a1321db4731a9dabeea22d8462de97a9ab0880 | [
"MIT"
] | null | null | null | chipy_org/settings.py | ahlusar1989/chipy.org | 01a1321db4731a9dabeea22d8462de97a9ab0880 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Django settings for account project
import os
import sys
import dj_database_url
from django.conf.global_settings import MIDDLEWARE_CLASSES
def env_var(key, default=None):
"""Retrieves env vars and makes Python boolean replacements"""
val = os.environ.get(key, default)
if val == 'True':
val = True
elif val == 'False':
val = False
return val
def env_list(key, defaults=[], delimiter=','):
val_list = defaults
val = os.environ.get(key, None)
if val:
val_list = val.split(delimiter)
return val_list
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(PROJECT_ROOT, 'apps'))
DEBUG = env_var('DEBUG', False)
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['chipy.org', 'www.chipy.org', 'chipy.herokuapp.com', 'chipy-149.herokuapp.com']
if DEBUG:
ALLOWED_HOSTS.append('localhost:8000')
GITHUB_APP_ID = env_var('GITHUB_APP_ID')
GITHUB_API_SECRET = env_var('GITHUB_API_SECRET')
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = env_var('SERVE_MEDIA', DEBUG)
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [(admin.split('@')[0], admin) for admin in env_var('ADMINS').split(',')]
MANAGERS = ADMINS
ALLOWED_HOSTS=env_list("ALLOWED_HOSTS", ['www.chipy.org', 'chipy.org'])
# dj_database_url will pull from the DATABASE_URL environment variable
DATABASES = {'default': dj_database_url.config(default='postgres://localhost:5432/chipy_org')}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Central"
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = '/'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = env_var('SITE_ID', 1)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "static"),
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATIC_URL = '/static/'
MEDIA_URL = STATIC_URL + 'media/'
STATIC_ROOT = "static"
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = os.path.join(STATIC_URL, "admin/")
# Make this unique, and don't share it with anybody.
SECRET_KEY = env_var('SECRET_KEY')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
ROOT_URLCONF = "chipy_org.urls"
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
"social_auth.context_processors.social_auth_login_redirect",
]
# Social Auth settings
MIDDLEWARE_CLASSES += ('chipy_org.libs.middleware.ChipySocialAuthExceptionMiddleware',)
LOGIN_ERROR_URL = '/'
AUTHENTICATION_BACKENDS = (
'social_auth.backends.twitter.TwitterBackend',
'social_auth.backends.facebook.FacebookBackend',
'social_auth.backends.google.GoogleOAuth2Backend',
'social_auth.backends.browserid.BrowserIDBackend',
'social_auth.backends.contrib.linkedin.LinkedinBackend',
'social_auth.backends.contrib.github.GithubBackend',
'social_auth.backends.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
SOCIAL_AUTH_ENABLED_BACKENDS = (
'google',
'github',
)
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
'chipy_org.libs.social_auth_pipelines.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details'
)
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email', 'first_name', 'last_name']
GITHUB_EXTRA_DATA = [
('email', 'email'),
]
INSTALLED_APPS = [
# Admin Tools
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.flatpages',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
# Third party
'nocaptcha_recaptcha',
'django_ical',
'envelope',
'flatblocks',
'flatpages_tinymce',
'django_gravatar',
'gunicorn',
'honeypot',
'interval',
'rest_framework',
'social_auth',
'south',
'storages',
'tinymce',
# theme
'django_forms_bootstrap',
# project
'about',
'contact',
'meetings',
'profiles',
]
if DEBUG:
# Add the command extensions
INSTALLED_APPS += ['django_extensions']
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
ENVELOPE_EMAIL_RECIPIENTS = env_var('ENVELOPE_EMAIL_RECIPIENTS').split(',')
EMAIL_BACKEND = env_var('EMAIL_BACKEND', 'django.core.mail.backends.smtp.EmailBackend')
EMAIL_HOST = env_var('EMAIL_HOST', 'smtp.sendgrid.net')
EMAIL_HOST_USER = env_var('EMAIL_HOST_USER', env_var('SENDGRID_USERNAME', None))
EMAIL_HOST_PASSWORD = env_var('EMAIL_HOST_PASSWORD', env_var('SENDGRID_PASSWORD', None))
EMAIL_PORT = int(env_var('EMAIL_PORT', 587))
EMAIL_USE_TLS = env_var('EMAIL_USE_TLS', True)
DEFAULT_FROM_EMAIL = env_var('DEFAULT_FROM_EMAIL', 'DoNotReply@chipy.org')
HONEYPOT_FIELD_NAME = 'email2'
if env_var('PRODUCTION', False):
PREPEND_WWW = True
TINYMCE_DEFAULT_CONFIG = {
'height': "500",
# custom plugins
'plugins': "table,spellchecker,paste,searchreplace,inlinepopups",
# editor theme
'theme': "advanced",
# custom CSS file for styling editor area
'content_css': MEDIA_URL + "css/custom_tinymce.css",
# use absolute urls when inserting links/images
'relative_urls': False,
}
NORECAPTCHA_SITE_KEY = env_var('NORECAPTCHA_SITE_KEY')
NORECAPTCHA_SECRET_KEY = env_var('NORECAPTCHA_SECRET_KEY')
FLATPAGES_TINYMCE_ADMIN = True
MEETUP_API_KEY = env_var('MEETUP_API_KEY')
GOOGLE_OAUTH2_CLIENT_ID = env_var('GOOGLE_OAUTH2_CLIENT_ID')
GOOGLE_OAUTH2_CLIENT_SECRET = env_var('GOOGLE_OAUTH2_CLIENT_SECRET')
# LOGGING = {
# 'version': 1,
# 'handlers': {
# 'console': {
# 'class': 'logging.StreamHandler',
# 'stream': sys.stdout,
# }
# },
# 'root': {
# 'handlers': ['console'],
# 'level': 'INFO'
# }
# } | 27.895911 | 96 | 0.720283 |
cc89bca44ee939607208a81f60285960b3e944cd | 1,910 | py | Python | tests/models/test_3d_secure_status.py | lejion/django-sagepaypi | 86eeff5afb9681fcc820e2d75910c4e642fefe85 | [
"MIT"
] | null | null | null | tests/models/test_3d_secure_status.py | lejion/django-sagepaypi | 86eeff5afb9681fcc820e2d75910c4e642fefe85 | [
"MIT"
] | 12 | 2019-03-06T14:27:38.000Z | 2019-03-12T21:45:59.000Z | tests/models/test_3d_secure_status.py | lejion/django-sagepaypi | 86eeff5afb9681fcc820e2d75910c4e642fefe85 | [
"MIT"
] | 1 | 2021-01-06T12:09:42.000Z | 2021-01-06T12:09:42.000Z | import mock
from sagepaypi.exceptions import InvalidTransactionStatus
from sagepaypi.models import Transaction
from tests.mocks import gone_response, auth_success_response, outcome_live_response
from tests.test_case import AppTestCase
class TestTransactionOutcome(AppTestCase):
fixtures = ['tests/fixtures/test']
def test_error__no_transaction_id(self):
transaction = Transaction.objects.get(pk='ec87ac03-7c34-472c-823b-1950da3568e6')
with self.assertRaises(InvalidTransactionStatus) as e:
transaction.get_3d_secure_status('pares-data')
self.assertEqual(
e.exception.args[0],
'transaction is missing a transaction_id'
)
@mock.patch('sagepaypi.gateway.default_gateway')
def test_error__500_response(self, mock_gateway):
mock_gateway.get_3d_secure_status.return_value = gone_response()
mock_gateway.get_transaction_outcome.return_value = gone_response()
transaction = Transaction.objects.get(pk='ec87ac03-7c34-472c-823b-1950da3568e6')
transaction.transaction_id = 'dummy-transaction-id'
transaction.get_3d_secure_status('pares-data')
# not expected
self.assertIsNone(transaction.secure_status)
@mock.patch('sagepaypi.gateway.default_gateway')
def test_outcome__success(self, mock_gateway):
mock_gateway.get_3d_secure_status.return_value = auth_success_response()
mock_gateway.get_transaction_outcome.return_value = outcome_live_response()
transaction = Transaction.objects.get(pk='ec87ac03-7c34-472c-823b-1950da3568e6')
transaction.transaction_id = 'dummy-transaction-id'
transaction.get_3d_secure_status('pares-data')
json = auth_success_response().json()
# expected
self.assertEqual(transaction.pares, 'pares-data')
self.assertEqual(transaction.secure_status, json['status'])
| 38.2 | 88 | 0.740314 |
ccbb431389e7d717d7e2ff4200a585a5a03bea10 | 19,809 | py | Python | week 1/gradient checking/Gradient+Checking+v1.py | sazzadBuet08/deep-learning-ai-improve-deep-networks-by-andrew-ng | 5e821ee8cf1a902ccedddd429645c60011e8b3b8 | [
"MIT"
] | null | null | null | week 1/gradient checking/Gradient+Checking+v1.py | sazzadBuet08/deep-learning-ai-improve-deep-networks-by-andrew-ng | 5e821ee8cf1a902ccedddd429645c60011e8b3b8 | [
"MIT"
] | null | null | null | week 1/gradient checking/Gradient+Checking+v1.py | sazzadBuet08/deep-learning-ai-improve-deep-networks-by-andrew-ng | 5e821ee8cf1a902ccedddd429645c60011e8b3b8 | [
"MIT"
] | null | null | null |
# coding: utf-8
# # Gradient Checking
#
# Welcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking.
#
# You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker.
#
# But backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking".
#
# Let's do it!
# In[1]:
# Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
# ## 1) How does gradient checking work?
#
# Backpropagation computes the gradients $\frac{\partial J}{\partial \theta}$, where $\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function.
#
# Because forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$.
#
# Let's look back at the definition of a derivative (or gradient):
# $$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$
#
# If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying "when $\varepsilon$ is really really small."
#
# We know the following:
#
# - $\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly.
# - You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct.
#
# Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct!
# ## 2) 1-dimensional gradient checking
#
# Consider a 1D linear function $J(\theta) = \theta x$. The model contains only a single real-valued parameter $\theta$, and takes $x$ as input.
#
# You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct.
#
# <img src="images/1Dgrad_kiank.png" style="width:600px;height:250px;">
# <caption><center> <u> **Figure 1** </u>: **1D linear model**<br> </center></caption>
#
# The diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation").
#
# **Exercise**: implement "forward propagation" and "backward propagation" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions.
# In[4]:
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
### START CODE HERE ### (approx. 1 line)
J = theta * x
### END CODE HERE ###
return J
# In[5]:
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
# **Expected Output**:
#
# <table style=>
# <tr>
# <td> ** J ** </td>
# <td> 8</td>
# </tr>
# </table>
# **Exercise**: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$.
# In[6]:
# GRADED FUNCTION: backward_propagation
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
### START CODE HERE ### (approx. 1 line)
dtheta = x
### END CODE HERE ###
return dtheta
# In[7]:
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
# **Expected Output**:
#
# <table>
# <tr>
# <td> ** dtheta ** </td>
# <td> 2 </td>
# </tr>
# </table>
# **Exercise**: To show that the `backward_propagation()` function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking.
#
# **Instructions**:
# - First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow:
# 1. $\theta^{+} = \theta + \varepsilon$
# 2. $\theta^{-} = \theta - \varepsilon$
# 3. $J^{+} = J(\theta^{+})$
# 4. $J^{-} = J(\theta^{-})$
# 5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$
# - Then compute the gradient using backward propagation, and store the result in a variable "grad"
# - Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula:
# $$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$
# You will need 3 Steps to compute this formula:
# - 1'. compute the numerator using np.linalg.norm(...)
# - 2'. compute the denominator. You will need to call np.linalg.norm(...) twice.
# - 3'. divide them.
# - If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation.
#
# In[12]:
# GRADED FUNCTION: gradient_check
def gradient_check(x, theta, epsilon = 1e-7):
"""
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.
### START CODE HERE ### (approx. 5 lines)
thetaplus = theta + epsilon # Step 1
thetaminus = theta - epsilon # Step 2
J_plus = thetaplus * x # Step 3
J_minus = thetaminus * x # Step 4
gradapprox = (J_plus - J_minus) / (2 * epsilon) # Step 5
### END CODE HERE ###
# Check if gradapprox is close enough to the output of backward_propagation()
### START CODE HERE ### (approx. 1 line)
grad = x
### END CODE HERE ###
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator /denominator # Step 3'
### END CODE HERE ###
if difference < 1e-7:
print ("The gradient is correct!")
else:
print ("The gradient is wrong!")
return difference
# In[13]:
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
# **Expected Output**:
# The gradient is correct!
# <table>
# <tr>
# <td> ** difference ** </td>
# <td> 2.9193358103083e-10 </td>
# </tr>
# </table>
# Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in `backward_propagation()`.
#
# Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it!
# ## 3) N-dimensional gradient checking
# The following figure describes the forward and backward propagation of your fraud detection model.
#
# <img src="images/NDgrad_kiank.png" style="width:600px;height:400px;">
# <caption><center> <u> **Figure 2** </u>: **deep neural network**<br>*LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID*</center></caption>
#
# Let's look at your implementations for forward propagation and backward propagation.
# In[14]:
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1./m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
# Now, run backward propagation.
# In[33]:
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T) # this is the wrong portion=># * 2
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
# db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True) # this is the wrong portion=># 4.
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
# You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct.
# **How does gradient checking work?**.
#
# As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still:
#
# $$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$
#
# However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "`dictionary_to_vector()`" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.
#
# The inverse function is "`vector_to_dictionary`" which outputs back the "parameters" dictionary.
#
# <img src="images/dictionary_to_vector.png" style="width:600px;height:400px;">
# <caption><center> <u> **Figure 2** </u>: **dictionary_to_vector() and vector_to_dictionary()**<br> You will need these functions in gradient_check_n()</center></caption>
#
# We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that.
#
# **Exercise**: Implement gradient_check_n().
#
# **Instructions**: Here is pseudo-code that will help you implement the gradient check.
#
# For each i in num_parameters:
# - To compute `J_plus[i]`:
# 1. Set $\theta^{+}$ to `np.copy(parameters_values)`
# 2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$
# 3. Calculate $J^{+}_i$ using to `forward_propagation_n(x, y, vector_to_dictionary(`$\theta^{+}$ `))`.
# - To compute `J_minus[i]`: do the same thing with $\theta^{-}$
# - Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$
#
# Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to `parameter_values[i]`. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute:
# $$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$
# In[34]:
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs two parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] += epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] -= epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
### END CODE HERE ###
if difference > 2e-7:
print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
else:
print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
return difference
# In[35]:
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
# **Expected output**:
#
# <table>
# <tr>
# <td> ** There is a mistake in the backward propagation!** </td>
# <td> difference = 0.285093156781 </td>
# </tr>
# </table>
# It seems that there were errors in the `backward_propagation_n` code we gave you! Good that you've implemented the gradient check. Go back to `backward_propagation` and try to find/correct the errors *(Hint: check dW2 and db1)*. Rerun the gradient check when you think you've fixed it. Remember you'll need to re-execute the cell defining `backward_propagation_n()` if you modify the code.
#
# Can you get gradient check to declare your derivative computation correct? Even though this part of the assignment isn't graded, we strongly urge you to try to find the bug and re-run gradient check until you're convinced backprop is now correctly implemented.
#
# **Note**
# - Gradient Checking is slow! Approximating the gradient with $\frac{\partial J}{\partial \theta} \approx \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon}$ is computationally costly. For this reason, we don't run gradient checking at every iteration during training. Just a few times to check if the gradient is correct.
# - Gradient Checking, at least as we've presented it, doesn't work with dropout. You would usually run the gradient check algorithm without dropout to make sure your backprop is correct, then add dropout.
#
# Congrats, you can be confident that your deep learning model for fraud detection is working correctly! You can even use this to convince your CEO. :)
#
# <font color='blue'>
# **What you should remember from this notebook**:
# - Gradient checking verifies closeness between the gradients from backpropagation and the numerical approximation of the gradient (computed using forward propagation).
# - Gradient checking is slow, so we don't run it in every iteration of training. You would usually run it only to make sure your code is correct, then turn it off and use backprop for the actual learning process.
# In[ ]:
| 44.216518 | 392 | 0.643849 |
b2a53a3bcaf994ebd51d05e539a674394f2256da | 14,309 | py | Python | .venv/lib/python3.8/site-packages/poetry/core/packages/dependency.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/poetry/core/packages/dependency.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/poetry/core/packages/dependency.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING
from typing import Any
from typing import FrozenSet
from typing import List
from typing import Optional
from typing import Union
import poetry.core.packages
from poetry.core.semver import Version
from poetry.core.semver import VersionConstraint
from poetry.core.semver import VersionRange
from poetry.core.semver import VersionUnion
from poetry.core.semver import parse_constraint
from poetry.core.version.markers import AnyMarker
from poetry.core.version.markers import parse_marker
from .constraints import parse_constraint as parse_generic_constraint
from .constraints.constraint import Constraint
from .constraints.multi_constraint import MultiConstraint
from .constraints.union_constraint import UnionConstraint
from .specification import PackageSpecification
from .utils.utils import convert_markers
if TYPE_CHECKING:
from poetry.core.version.markers import BaseMarker # noqa
from poetry.core.version.markers import VersionTypes # noqa
from .constraints import BaseConstraint # noqa
class Dependency(PackageSpecification):
def __init__(
self,
name, # type: str
constraint, # type: Union[str, VersionConstraint]
optional=False, # type: bool
category="main", # type: str
allows_prereleases=False, # type: bool
extras=None, # type: Union[List[str], FrozenSet[str]]
source_type=None, # type: Optional[str]
source_url=None, # type: Optional[str]
source_reference=None, # type: Optional[str]
source_resolved_reference=None, # type: Optional[str]
):
super(Dependency, self).__init__(
name,
source_type=source_type,
source_url=source_url,
source_reference=source_reference,
source_resolved_reference=source_resolved_reference,
features=extras,
)
self._constraint = None
self.set_constraint(constraint=constraint)
self._pretty_constraint = str(constraint)
self._optional = optional
self._category = category
if isinstance(self._constraint, VersionRange) and self._constraint.min:
allows_prereleases = (
allows_prereleases or self._constraint.min.is_prerelease()
)
self._allows_prereleases = allows_prereleases
self._python_versions = "*"
self._python_constraint = parse_constraint("*")
self._transitive_python_versions = None
self._transitive_python_constraint = None
self._transitive_marker = None
self._extras = frozenset(extras or [])
self._in_extras = []
self._activated = not self._optional
self.is_root = False
self.marker = AnyMarker()
self.source_name = None
@property
def name(self): # type: () -> str
return self._name
@property
def constraint(self): # type: () -> "VersionTypes"
return self._constraint
def set_constraint(self, constraint): # type: (Union[str, "VersionTypes"]) -> None
try:
if not isinstance(constraint, VersionConstraint):
self._constraint = parse_constraint(constraint)
else:
self._constraint = constraint
except ValueError:
self._constraint = parse_constraint("*")
@property
def pretty_constraint(self): # type: () -> str
return self._pretty_constraint
@property
def pretty_name(self): # type: () -> str
return self._pretty_name
@property
def category(self): # type: () -> str
return self._category
@property
def python_versions(self): # type: () -> str
return self._python_versions
@python_versions.setter
def python_versions(self, value): # type: (str) -> None
self._python_versions = value
self._python_constraint = parse_constraint(value)
if not self._python_constraint.is_any():
self.marker = self.marker.intersect(
parse_marker(
self._create_nested_marker(
"python_version", self._python_constraint
)
)
)
@property
def transitive_python_versions(self): # type: () -> str
if self._transitive_python_versions is None:
return self._python_versions
return self._transitive_python_versions
@transitive_python_versions.setter
def transitive_python_versions(self, value): # type: (str) -> None
self._transitive_python_versions = value
self._transitive_python_constraint = parse_constraint(value)
@property
def transitive_marker(self): # type: () -> "BaseMarker"
if self._transitive_marker is None:
return self.marker
return self._transitive_marker
@transitive_marker.setter
def transitive_marker(self, value): # type: ("BaseMarker") -> None
self._transitive_marker = value
@property
def python_constraint(self): # type: () -> "VersionTypes"
return self._python_constraint
@property
def transitive_python_constraint(self): # type: () -> "VersionTypes"
if self._transitive_python_constraint is None:
return self._python_constraint
return self._transitive_python_constraint
@property
def extras(self): # type: () -> FrozenSet[str]
return self._extras
@property
def in_extras(self): # type: () -> list
return self._in_extras
@property
def base_pep_508_name(self): # type: () -> str
requirement = self.pretty_name
if self.extras:
requirement += "[{}]".format(",".join(self.extras))
if isinstance(self.constraint, VersionUnion):
if self.constraint.excludes_single_version():
requirement += " ({})".format(str(self.constraint))
else:
constraints = self.pretty_constraint.split(",")
constraints = [parse_constraint(c) for c in constraints]
constraints = [str(c) for c in constraints]
requirement += " ({})".format(",".join(constraints))
elif isinstance(self.constraint, Version):
requirement += " (=={})".format(self.constraint.text)
elif not self.constraint.is_any():
requirement += " ({})".format(str(self.constraint).replace(" ", ""))
return requirement
def allows_prereleases(self): # type: () -> bool
return self._allows_prereleases
def is_optional(self): # type: () -> bool
return self._optional
def is_activated(self): # type: () -> bool
return self._activated
def is_vcs(self): # type: () -> bool
return False
def is_file(self): # type: () -> bool
return False
def is_directory(self): # type: () -> bool
return False
def is_url(self): # type: () -> bool
return False
def accepts(self, package): # type: (poetry.core.packages.Package) -> bool
"""
Determines if the given package matches this dependency.
"""
return (
self._name == package.name
and self._constraint.allows(package.version)
and (not package.is_prerelease() or self.allows_prereleases())
)
def to_pep_508(self, with_extras=True): # type: (bool) -> str
requirement = self.base_pep_508_name
markers = []
has_extras = False
if not self.marker.is_any():
marker = self.marker
if not with_extras:
marker = marker.without_extras()
# we re-check for any marker here since the without extra marker might
# return an any marker again
if not marker.is_empty() and not marker.is_any():
markers.append(str(marker))
has_extras = "extra" in convert_markers(marker)
else:
# Python marker
if self.python_versions != "*":
python_constraint = self.python_constraint
markers.append(
self._create_nested_marker("python_version", python_constraint)
)
in_extras = " || ".join(self._in_extras)
if in_extras and with_extras and not has_extras:
markers.append(
self._create_nested_marker("extra", parse_generic_constraint(in_extras))
)
if markers:
if self.is_vcs() or self.is_url():
requirement += " "
if len(markers) > 1:
markers = ["({})".format(m) for m in markers]
requirement += "; {}".format(" and ".join(markers))
else:
requirement += "; {}".format(markers[0])
return requirement
def _create_nested_marker(
self, name, constraint
): # type: (str, Union["BaseConstraint", Version, VersionConstraint]) -> str
if isinstance(constraint, (MultiConstraint, UnionConstraint)):
parts = []
for c in constraint.constraints:
multi = False
if isinstance(c, (MultiConstraint, UnionConstraint)):
multi = True
parts.append((multi, self._create_nested_marker(name, c)))
glue = " and "
if isinstance(constraint, UnionConstraint):
parts = [
"({})".format(part[1]) if part[0] else part[1] for part in parts
]
glue = " or "
else:
parts = [part[1] for part in parts]
marker = glue.join(parts)
elif isinstance(constraint, Constraint):
marker = '{} {} "{}"'.format(name, constraint.operator, constraint.version)
elif isinstance(constraint, VersionUnion):
parts = []
for c in constraint.ranges:
parts.append(self._create_nested_marker(name, c))
glue = " or "
parts = ["({})".format(part) for part in parts]
marker = glue.join(parts)
elif isinstance(constraint, Version):
if constraint.precision >= 3 and name == "python_version":
name = "python_full_version"
marker = '{} == "{}"'.format(name, constraint.text)
else:
if constraint.min is not None:
min_name = name
if constraint.min.precision >= 3 and name == "python_version":
min_name = "python_full_version"
if constraint.max is None:
name = min_name
op = ">="
if not constraint.include_min:
op = ">"
version = constraint.min.text
if constraint.max is not None:
max_name = name
if constraint.max.precision >= 3 and name == "python_version":
max_name = "python_full_version"
text = '{} {} "{}"'.format(min_name, op, version)
op = "<="
if not constraint.include_max:
op = "<"
version = constraint.max
text += ' and {} {} "{}"'.format(max_name, op, version)
return text
elif constraint.max is not None:
if constraint.max.precision >= 3 and name == "python_version":
name = "python_full_version"
op = "<="
if not constraint.include_max:
op = "<"
version = constraint.max
else:
return ""
marker = '{} {} "{}"'.format(name, op, version)
return marker
def activate(self): # type: () -> None
"""
Set the dependency as mandatory.
"""
self._activated = True
def deactivate(self): # type: () -> None
"""
Set the dependency as optional.
"""
if not self._optional:
self._optional = True
self._activated = False
def with_constraint(
self, constraint
): # type: (Union[str, VersionConstraint]) -> Dependency
new = Dependency(
self.pretty_name,
constraint,
optional=self.is_optional(),
category=self.category,
allows_prereleases=self.allows_prereleases(),
extras=self._extras,
source_type=self._source_type,
source_url=self._source_url,
source_reference=self._source_reference,
)
new.is_root = self.is_root
new.python_versions = self.python_versions
new.transitive_python_versions = self.transitive_python_versions
new.marker = self.marker
new.transitive_marker = self.transitive_marker
for in_extra in self.in_extras:
new.in_extras.append(in_extra)
return new
def __eq__(self, other): # type: (Any) -> bool
if not isinstance(other, Dependency):
return NotImplemented
return (
self.is_same_package_as(other)
and self._constraint == other.constraint
and self._extras == other.extras
)
def __ne__(self, other): # type: (Any) -> bool
return not self == other
def __hash__(self): # type: () -> int
return (
super(Dependency, self).__hash__()
^ hash(self._constraint)
^ hash(self._extras)
)
def __str__(self): # type: () -> str
if self.is_root:
return self._pretty_name
return self.base_pep_508_name
def __repr__(self): # type: () -> str
return "<{} {}>".format(self.__class__.__name__, str(self))
| 33.988124 | 89 | 0.566007 |
f105cb1db086ef424560ec3c0f4d5ca9cc837bbc | 1,018 | py | Python | lib/python2.7/site-packages/braintree/partner_merchant.py | ervinpepic/E-commerce | 2c15255d1730728cf35c166b9f88cffcb99f5323 | [
"MIT"
] | 182 | 2015-01-09T05:26:46.000Z | 2022-03-16T14:10:06.000Z | lib/python2.7/site-packages/braintree/partner_merchant.py | ervinpepic/E-commerce | 2c15255d1730728cf35c166b9f88cffcb99f5323 | [
"MIT"
] | 95 | 2015-02-24T23:29:56.000Z | 2022-03-13T03:27:58.000Z | lib/python2.7/site-packages/braintree/partner_merchant.py | ervinpepic/E-commerce | 2c15255d1730728cf35c166b9f88cffcb99f5323 | [
"MIT"
] | 93 | 2015-02-19T17:59:06.000Z | 2022-03-19T17:01:25.000Z | from braintree.configuration import Configuration
from braintree.resource import Resource
class PartnerMerchant(Resource):
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if "partner_merchant_id" in attributes:
self.partner_merchant_id = attributes.pop("partner_merchant_id")
if "private_key" in attributes:
self.private_key = attributes.pop("private_key")
if "public_key" in attributes:
self.public_key = attributes.pop("public_key")
if "merchant_public_id" in attributes:
self.merchant_public_id = attributes.pop("merchant_public_id")
if "client_side_encryption_key" in attributes:
self.client_side_encryption_key = attributes.pop("client_side_encryption_key")
def __repr__(self):
detail_list = ["partner_merchant_id", "public_key", "merchant_public_id", "client_side_encryption_key"]
return super(PartnerMerchant, self).__repr__(detail_list)
| 46.272727 | 111 | 0.719057 |
96e8ffd445d91f37345680ae1f18deba3a8ff465 | 87 | py | Python | shipengine/http_client/__init__.py | artem1205/shipengine-python | b23a7adee19162220f106346d4d81c755aafea27 | [
"Apache-2.0"
] | null | null | null | shipengine/http_client/__init__.py | artem1205/shipengine-python | b23a7adee19162220f106346d4d81c755aafea27 | [
"Apache-2.0"
] | null | null | null | shipengine/http_client/__init__.py | artem1205/shipengine-python | b23a7adee19162220f106346d4d81c755aafea27 | [
"Apache-2.0"
] | null | null | null | """Synchronous HTTP Client for ShipEngine SDK."""
from .client import ShipEngineClient
| 29 | 49 | 0.793103 |
aa9c66361c624ade298d8f05060d2670b89d38f1 | 646 | py | Python | python/nano/src/bigdl/nano/pytorch/optim/__init__.py | DirkFi/BigDL | 7493209165c046116470b9a1e1c8f527915d6f1e | [
"Apache-2.0"
] | null | null | null | python/nano/src/bigdl/nano/pytorch/optim/__init__.py | DirkFi/BigDL | 7493209165c046116470b9a1e1c8f527915d6f1e | [
"Apache-2.0"
] | null | null | null | python/nano/src/bigdl/nano/pytorch/optim/__init__.py | DirkFi/BigDL | 7493209165c046116470b9a1e1c8f527915d6f1e | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.nano.pytorch.optim.sparseadam import SparseAdam
| 35.888889 | 74 | 0.766254 |
ccd3300ba81b67ba0e0621215ac58c9f517d3db1 | 1,154 | py | Python | test/cloud_simulation_test.py | shihao-zhang/buildsimhub_python_api | daa0b7d2e92820b6b1cdaa981fb9f0d88c375012 | [
"MIT"
] | 19 | 2018-02-27T22:58:04.000Z | 2022-02-21T15:03:59.000Z | test/cloud_simulation_test.py | shihao-zhang/buildsimhub_python_api | daa0b7d2e92820b6b1cdaa981fb9f0d88c375012 | [
"MIT"
] | 11 | 2018-02-15T16:47:53.000Z | 2018-12-19T18:33:20.000Z | test/cloud_simulation_test.py | shihao-zhang/buildsimhub_python_api | daa0b7d2e92820b6b1cdaa981fb9f0d88c375012 | [
"MIT"
] | 11 | 2018-01-26T02:12:38.000Z | 2019-09-29T12:05:31.000Z | """
This sample file demonstrate three ways of running a single model simulation
"""
import BuildSimHubAPI as bsh_api
import BuildSimHubAPI.postprocess as pp
# project_key can be found in every project (click the information icon next to project name)
project_key = "f98aadb3-254f-428d-a321-82a6e4b9424c"
file_dir = "/Users/weilixu/Desktop/data/jsontest/5ZoneAirCooled_UniformLoading.epJSON"
wea_dir = "/Users/weilixu/Desktop/data/jsontest/in.epw"
# initialize the client
bsh = bsh_api.BuildSimHubAPIClient()
"""
The most straightforward way to do simulation
"""
new_sj_run = bsh.new_simulation_job(project_key)
results = new_sj_run.run(file_dir, track=True)
if results:
print(str(results.net_site_eui()) + " " + results.last_parameter_unit)
"""
Upload your model with a specific model_key and run simulation
"""
new_sj = bsh.new_simulation_job(project_key)
response = new_sj.create_model(file_dir)
results = new_sj.run_model_simulation(track=True)
if results:
print(str(results.not_met_hour_cooling()) + " " + results.last_parameter_unit)
load_data = results.zone_load()
load = pp.ZoneLoad(load_data)
print(load.pandas_df())
| 30.368421 | 93 | 0.77643 |
9652fe2bff47d4497a7a11b2370a4629af04d3c5 | 604 | py | Python | m4/numpyinclude.py | ndl303/umkehr | a9a499931a2b99bb9eda6f93dae3b3807ef1746b | [
"Unlicense"
] | 1 | 2020-06-29T19:56:58.000Z | 2020-06-29T19:56:58.000Z | m4/numpyinclude.py | woudc/woudc-umkehr | a9a499931a2b99bb9eda6f93dae3b3807ef1746b | [
"Unlicense"
] | null | null | null | m4/numpyinclude.py | woudc/woudc-umkehr | a9a499931a2b99bb9eda6f93dae3b3807ef1746b | [
"Unlicense"
] | null | null | null | import site
import os
names = site.getusersitepackages()
found = False
for name in names:
if (not found):
fullname = name + os.sep +'numpy'+os.sep+'core'+os.sep+'include'
found = os.path.isdir(fullname)
if (found):
print(fullname)
if (not found):
names = site.getsitepackages()
for name in names:
if (not found):
fullname = name + os.sep+'numpy'+os.sep+'core'+os.sep+'include'
found = os.path.isdir(fullname)
if (found):
print(fullname)
if (not found):
print('NOTFOUND')
| 24.16 | 73 | 0.554636 |
6604529143a3f04aa81dc2b0ae64257985d2d7bc | 8,072 | py | Python | packages/create_python.py | emsig/libdlf | bbe626d7bf4b13afb8878eafb28ed835c884104e | [
"CC-BY-4.0"
] | 4 | 2021-08-06T14:52:20.000Z | 2021-09-12T09:40:06.000Z | packages/create_python.py | emsig/libdlf | bbe626d7bf4b13afb8878eafb28ed835c884104e | [
"CC-BY-4.0"
] | 12 | 2021-08-07T08:40:26.000Z | 2021-08-22T17:51:16.000Z | packages/create_python.py | emsig/libdlf | bbe626d7bf4b13afb8878eafb28ed835c884104e | [
"CC-BY-4.0"
] | 1 | 2021-08-10T09:20:27.000Z | 2021-08-10T09:20:27.000Z | import os
import json
import shutil
import pathlib
import subprocess
import numpy as np
from os.path import abspath
# Get git version
version = subprocess.check_output(
['git', 'describe', '--tags'], stderr=subprocess.DEVNULL
).strip().decode('utf-8').split('-')
if len(version) > 1 and version[1]:
version = version[0][1:] + '.dev' + version[1]
else:
version = version[0][1:]
# Create package directory
path_libdlf = abspath("python/libdlf")
path_lib = os.path.join(path_libdlf, 'lib')
pathlib.Path(path_libdlf).mkdir(parents=True, exist_ok=True)
# Copy library to python package
shutil.copytree(abspath('../lib'), path_lib, dirs_exist_ok=True)
# Copy README
shutil.copyfile('../README.md', 'python/README.md')
# Copy LICENSEs
shutil.copyfile('../LICENSE', 'python/libdlf/lib/LICENSE')
shutil.copyfile('LICENSE', 'python/LICENSE')
# Create setup.py
setup = f"""# -*- coding: utf-8 -*-
from setuptools import setup
# Longer description
readme = ('Library for Digital Linear Filters (DLF) as used, for instance, '
'in Geophysics for electromagnetic modelling. See '
'https://github.com/emsig/libdlf')
setup(
name="libdlf",
version="{version}",
description="Library for Digital Linear Filters (DLF)",
long_description=readme,
author="The emsig community",
author_email="info@emsig.xyz",
url="https://github.com/emsig/libdlf",
license="BSD-3-Clause",
packages=["libdlf"],
include_package_data=True,
install_requires=["numpy"],
)
"""
with open(abspath("python/setup.py"), "w") as fs:
fs.write(setup)
# Create setup.cfg
with open(abspath("python/setup.cfg"), "w") as fs:
fs.write("[flake8]\n")
fs.write("per-file-ignores = __init__.py: F401")
# Create MANIFEST.in
with open(abspath("python/MANIFEST.in"), "w") as fm:
fm.write("include libdlf/lib/LICENSE\n")
fm.write("include libdlf/lib/*/*.npz\n")
fm.write("include LICENSE\n")
fm.write("exclude MANIFEST.in\n")
fm.write("exclude setup.cfg\n")
# Read json
with open(os.path.join(path_lib, 'filters.json')) as fj:
lib = json.load(fj)
# Create init file
with open(os.path.join(path_libdlf, '__init__.py'), 'w') as fi:
# Loop over transforms and add them
for transform, flist in lib.items():
fi.write(f"from libdlf import {transform}\n")
fi.write(f"\n__version__ = '{version}'\n")
# Loop over transforms
for transform, flist in lib.items():
# Create transform file and loop over filters
with open(os.path.join(path_libdlf, f"{transform}.py"), "w") as ft:
# Imports
ft.write("import os\n")
ft.write("import numpy as np\n\n\n")
# Write __all__
ft.write("__all__ = [\n '")
ft.write("',\n '".join([filt['name'] for filt in flist]))
ft.write("',\n]\n\n")
# Path of the library
ft.write("_LIBPATH = os.path.abspath(os.path.dirname(__file__))\n\n\n")
# Number of filters for this transform
nr_filt = len(flist)
# Loop over filters
for f_i, filt in enumerate(flist):
# File path and name of the filter
fname = f"{filt['file']}"
# Convert txt to npz
txtfile = np.loadtxt(os.path.join(path_libdlf, fname), unpack=True)
np.savez_compressed(
os.path.join(path_libdlf, fname[:-4]), dlf=txtfile
)
# Start the function
ft.write(f"def {filt['name']}():")
# Get and write header
with open(abspath(f"python/libdlf/{fname}")) as fl:
# Add title
ft.write(f'\n """{fl.readline()[2: ]}')
# Loop over remaining lines
for line in fl:
# Do not copy the title-underline; just newline
if '========' in line:
ft.write('\n')
# Empty lines: only remove comment
elif line == '#\n':
ft.write('\n')
# The license is the last thing of the header
elif 'file is part of libdlf' in line:
# Add returned values
ft.write("\n Returns\n")
ft.write(" -------\n")
values = filt['values'].replace(',', ', ')
ft.write(f" base, {values} : ndarray\n")
ft.write(" Filter base and its values.\n\n")
# Finish header
ft.write(' """\n')
# Stop header loop
break
# Remove comment (#) and add indentation
else:
ft.write(f" {line[2:]}")
# Write function; we use np.loadtxt to read the files
ft.write(
f" if getattr({filt['name']}, 'cache', None) is None:\n"
f" fname = '{fname[:-3]}npz'\n"
f" {filt['name']}.cache = np.load(\n"
" os.path.join(_LIBPATH, fname))['dlf']\n"
f" return {filt['name']}.cache\n")
# Add values to function
ft.write(
f"\n\n{filt['name']}.values = {filt['values'].split(',')}\n")
# Empty lines after function (except for last filter)
if f_i < nr_filt-1:
ft.write("\n\n")
# Remove txt file.
os.remove(os.path.join(path_libdlf, fname))
# Remove json file.
os.remove(os.path.join(path_lib, 'filters.json'))
# Write tests
test_filters = r"""import libdlf
import numpy as np
# Anderson (1975): J0_01/J0_2; sin_1/cos_1; with a=1
xt = np.array([1.0])
def test_hankel():
# Analytical RHS
rhs_j0 = np.exp(-xt**2/4)/2
rhs_j1 = xt/4*np.exp(-xt**2/4)
# Loop over filters
for i, filt in enumerate(libdlf.hankel.__all__):
# Get filter
tfilt = getattr(libdlf.hankel, filt)
# Get return values
values = tfilt.__doc__.split('-------')[1]
values = values.split('\n')[1].split(':')[0].strip()
# Load filter
if len(values.split(',')) == 3:
base, j0, j1 = tfilt()
elif 'j0' in values:
base, j0 = tfilt()
elif 'j1' in values:
base, j1 = tfilt()
else:
assert 1 == 0
# Required wavenumbers
lambd = base/xt[:, None]
# Compute values
lhs_j0 = lambd*np.exp(-lambd**2)
lhs_j1 = lambd**2*np.exp(-lambd**2)
# Check
if 'j0' in values:
assert abs((rhs_j0 - np.dot(lhs_j0, j0)/xt) / rhs_j0) < 1e-4
if 'j1' in values:
assert abs((rhs_j1 - np.dot(lhs_j1, j1)/xt) / rhs_j1) < 1e-4
def test_fourier():
# Analytical RHS
rhs_sin = np.sqrt(np.pi)*xt*np.exp(-xt**2/4)/4
rhs_cos = np.sqrt(np.pi)*np.exp(-xt**2/4)/2
# Loop over filters
for i, filt in enumerate(libdlf.fourier.__all__):
# Get filter
tfilt = getattr(libdlf.fourier, filt)
# Get return values
values = tfilt.__doc__.split('-------')[1]
values = values.split('\n')[1].split(':')[0].strip()
# Load filter
if len(values.split(',')) == 3:
base, sin, cos = tfilt()
elif 'sin' in values:
base, sin = tfilt()
elif 'cos' in values:
base, cos = tfilt()
else:
assert 1 == 0
# Required frequencies
freq = base/xt[:, None]
# Compute values
lhs_sin = freq*np.exp(-freq**2)
lhs_cos = np.exp(-freq**2)
# Check
if 'sin' in values:
assert abs((rhs_sin - np.dot(lhs_sin, sin)/xt) / rhs_sin) < 1e-4
if 'cos' in values:
assert abs((rhs_cos - np.dot(lhs_cos, cos)/xt) / rhs_cos) < 1e-4
"""
pathlib.Path(abspath("python/tests")).mkdir(exist_ok=True)
with open(abspath("python/tests/test_filters.py"), "w") as fs:
fs.write(test_filters)
| 29.567766 | 79 | 0.54051 |
ff978cd83ffa847cb62b0f4334ab3b9fbdddf634 | 1,285 | py | Python | pyannote/audio/embedding/approaches/__init__.py | lgalmant/pyannote-audio | d58a9d2e18fb2fddaab99dbc6f93fdbdcfc5f290 | [
"MIT"
] | null | null | null | pyannote/audio/embedding/approaches/__init__.py | lgalmant/pyannote-audio | d58a9d2e18fb2fddaab99dbc6f93fdbdcfc5f290 | [
"MIT"
] | null | null | null | pyannote/audio/embedding/approaches/__init__.py | lgalmant/pyannote-audio | d58a9d2e18fb2fddaab99dbc6f93fdbdcfc5f290 | [
"MIT"
] | 1 | 2020-02-06T16:22:54.000Z | 2020-02-06T16:22:54.000Z | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from .classification import Classification
from .triplet_loss import TripletLoss
| 40.15625 | 79 | 0.775875 |
f558e239b85ef4a70ab098732b4018d3120e8c48 | 3,409 | py | Python | toys/basic-example.py | jmtrivial/cartel-generation | 6746f99f43b67b6809fe24f940acfaf0d3728689 | [
"MIT"
] | null | null | null | toys/basic-example.py | jmtrivial/cartel-generation | 6746f99f43b67b6809fe24f940acfaf0d3728689 | [
"MIT"
] | null | null | null | toys/basic-example.py | jmtrivial/cartel-generation | 6746f99f43b67b6809fe24f940acfaf0d3728689 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
avenirStyle = "font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;line-height:125%;font-family:Avenir;-inkscape-font-specification:'Avenir, Medium';text-align:start;writing-mode:lr-tb;"
avenirStyleLeft = avenirStyle+"text-anchor:start;"
avenirStyleRight = avenirStyle+"text-anchor:end;"
def addLargeCartel(root, x, y, txtAuthor, txtTitle, txtDate, txtText, txtMedia, txtCollection):
cartel = ET.SubElement(root, "svg", {"height":"15cm", "width":"20cm", "x":x, "y":y})
image = ET.SubElement(cartel, "image", {"height":"15cm", "width":"20cm", "x":"0cm", "y":"0cm", "xlink:href":"../img/vermont_bg_big.png"})
author = ET.SubElement(cartel, "text", {"style":avenirStyleLeft + "font-size:22px", "x":"2cm", "y":"2cm"}).text = txtAuthor.decode('utf-8')
titleBox = ET.SubElement(cartel, "flowRoot")
titleBoxRegion = ET.SubElement(titleBox, "flowRegion")
titleBoxRegionShape = ET.SubElement(titleBoxRegion, "rect", {"width":"17cm", "height":"4cm", "x":"2cm", "y":"2.8cm"})
title = ET.SubElement(titleBox, "flowPara", { "style":avenirStyleLeft + "font-size:40px" }).text = txtTitle.decode('utf-8')
date = ET.SubElement(titleBox, "flowPara", { "style":avenirStyleLeft + "font-size:23px" }).text = txtDate.decode('utf-8')
descBox = ET.SubElement(cartel, "flowRoot")
descBoxRegion = ET.SubElement(descBox, "flowRegion")
descBoxRegionShape = ET.SubElement(descBoxRegion, "rect", {"width":"17cm", "height":"5.5cm", "x":"2cm", "y":"7.5cm"})
text = ET.SubElement(descBox, "flowPara", { "style":avenirStyleLeft + "font-size:21px" }).text = txtText.decode('utf-8')
media = ET.SubElement(cartel, "text", {"style":avenirStyleLeft + "font-size:22px", "x":"2cm", "y":"13.5cm"}).text = txtMedia.decode('utf-8')
collection = ET.SubElement(cartel, "text", {"style":avenirStyleRight + "font-size:22px", "x":"19cm", "y":"13.5cm"}).text = txtCollection.decode('utf-8')
cornerTopLeft = ET.SubElement(cartel, "path", {"style":"fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1", "d":"M 20 0 L 0 0 L 0 20"})
root = ET.Element("svg", {"version":"1.2", "xmlns:xlink":"http://www.w3.org/1999/xlink", "xmlns":"http://www.w3.org/2000/svg", "height":"30cm", "width":"40cm"})
addLargeCartel(root, "0cm", "0cm", "Roget Jourdain (1845-1918)", "Élection du Conseil municipal, tableau récapitulatif des votes", "19ème siècle", "Lorem ipsum dolor sit amet, consectetur adi", "Aquarelle 29,5 x 49,5 cm", "Musée d'histoire locale")
addLargeCartel(root, "20cm", "0cm", "Roget Jourdain (1845-1918)", "Élection du Conseil municipal, tableau récapitulatif des votes", "19ème siècle", "Lorem ipsum dolor sit amet, consectetur adi", "Aquarelle 29,5 x 49,5 cm", "Musée d'histoire locale")
addLargeCartel(root, "0cm", "15cm", "Roget Jourdain (1845-1918)", "Élection du Conseil municipal, tableau récapitulatif des votes", "19ème siècle", "Lorem ipsum dolor sit amet, consectetur adi", "Aquarelle 29,5 x 49,5 cm", "Musée d'histoire locale")
addLargeCartel(root, "20cm", "15cm", "Roget Jourdain (1845-1918)", "Élection du Conseil municipal, tableau récapitulatif des votes", "19ème siècle", "Lorem ipsum dolor sit amet, consectetur adi", "Aquarelle 29,5 x 49,5 cm", "Musée d'histoire locale")
tree = ET.ElementTree(root)
tree.write("example.svg")
| 74.108696 | 253 | 0.699032 |
0d697a986ea1d146621fdcf06a970eb8869ce4df | 1,136 | py | Python | vmware_nsx/dhcp_meta/constants.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/dhcp_meta/constants.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/dhcp_meta/constants.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | 1 | 2019-06-21T18:07:53.000Z | 2019-06-21T18:07:53.000Z | # Copyright 2014 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.db import l3_db
from neutron_lib import constants as const
# A unique MAC to quickly identify the LSN port used for metadata services
# when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'.
METADATA_MAC = "fa:15:73:74:d4:74"
METADATA_PORT_ID = 'metadata:id'
METADATA_PORT_NAME = 'metadata:name'
METADATA_DEVICE_ID = 'metadata:device'
SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP,
const.DEVICE_OWNER_ROUTER_GW,
l3_db.DEVICE_OWNER_ROUTER_INTF)
| 39.172414 | 78 | 0.734155 |
629c2de4441a3df8906b4c14042bc19bb1833d8e | 3,413 | py | Python | wsl/networks/medinet/engine.py | nishanthta/wsl | 5fda3b909a314b7f88ffa9ab27a6a142de6b0159 | [
"MIT"
] | 1 | 2020-11-18T23:46:09.000Z | 2020-11-18T23:46:09.000Z | wsl/networks/medinet/engine.py | nishanthta/wsl | 5fda3b909a314b7f88ffa9ab27a6a142de6b0159 | [
"MIT"
] | null | null | null | wsl/networks/medinet/engine.py | nishanthta/wsl | 5fda3b909a314b7f88ffa9ab27a6a142de6b0159 | [
"MIT"
] | 5 | 2020-09-11T15:43:42.000Z | 2021-03-29T16:42:33.000Z | #!/usr/bin/python3
import time
from typing import Dict, Any
import numpy as np
from wsl.networks.medinet.utils import regression_accuracy
from sklearn.metrics import r2_score
from scipy.stats import spearmanr
from monai.metrics import compute_roc_auc, compute_confusion_metric
import torch
def engine(loader: Any, checkpoint: Dict[str, Any], batchsize: int,
classes: int, variable_type: str, error_range: int, is_train: bool):
overall_loss = []
all_preds = torch.zeros((0, classes))
all_labels = torch.zeros((0, classes))
labels_onehot = torch.FloatTensor(batchsize, classes).cuda()
start = time.time()
sigmoid = torch.nn.Sigmoid()
with torch.set_grad_enabled(is_train):
for iter_num, data in enumerate(loader):
# name = data[0]
imgs = data[1].cuda().float()
labels = data[2].cuda()
predicted = checkpoint['model'](imgs)
loss = checkpoint['criterion'](predicted, labels)
predicted, labels = predicted.detach(), labels.detach()
if is_train:
loss.backward()
checkpoint['optimizer'].step()
checkpoint['optimizer'].zero_grad()
overall_loss.append(float(loss.item()))
all_preds = torch.cat((predicted, all_preds))
if variable_type == 'categorical':
if labels_onehot.shape[0] != labels.shape[0]:
labels_onehot = torch.FloatTensor(labels.shape[0], classes).cuda()
labels_onehot.zero_()
labels_onehot.scatter_(1, labels.unsqueeze(dim=1), 1)
all_labels = torch.cat((labels_onehot.float(), all_labels))
predicted = predicted.max(dim=1)[1] # for correct printing
else:
all_labels = torch.cat((labels, all_labels))
speed = batchsize * iter_num // (time.time() - start)
print('Epoch:', checkpoint['epoch'], 'Iter:', iter_num,
'Pred:', round(predicted.float().mean().item(), 3),
'Label:', round(labels.float().mean().item(), 3),
'Loss:', round(np.mean(overall_loss), 3),
'Speed:', int(speed), 'img/s', end='\r', flush=True)
loss = np.mean(overall_loss)
if variable_type == 'continous':
all_labels, all_preds = all_labels.cpu(), all_preds.cpu()
rmetric = r2_score(all_labels, all_preds)
acc = regression_accuracy(all_labels, all_preds, error_range)
spear, pvalue = spearmanr(all_preds, all_labels)
summary = (f'Epoch Summary - Loss:{round(loss, 3)} Spearman:{round(spear, 2)} PValue:{round(pvalue, 3)} ' +
f'R2:{round(rmetric, 1)} Accuracy(at {error_range}):{round(100 * acc, 1)}')
else:
rmetric = compute_roc_auc(all_preds, all_labels, other_act=sigmoid)
sens = compute_confusion_metric(all_preds, all_labels,
activation=sigmoid, metric_name='sensitivity')
spec = compute_confusion_metric(all_preds, all_labels,
activation=sigmoid, metric_name='specificity')
summary = (f'Epoch Summary- Loss:{round(loss, 3)} ROC:{round(rmetric * 100, 1)} ' +
f'Sensitivity:{round(100 * sens, 1)} Specificity: {round(100 * spec, 1)}')
print(summary)
return loss, rmetric, summary
| 44.324675 | 115 | 0.600352 |
ba96767e6eb734e1aa61e83cb1ef25bc5206d044 | 31,785 | py | Python | havsim/simulation/update_lane_routes.py | ronan-keane/hav-sim | 0aaf9674e987822ff2dc90c74613d5e68e8ef0ce | [
"Apache-2.0"
] | null | null | null | havsim/simulation/update_lane_routes.py | ronan-keane/hav-sim | 0aaf9674e987822ff2dc90c74613d5e68e8ef0ce | [
"Apache-2.0"
] | null | null | null | havsim/simulation/update_lane_routes.py | ronan-keane/hav-sim | 0aaf9674e987822ff2dc90c74613d5e68e8ef0ce | [
"Apache-2.0"
] | 2 | 2020-09-30T22:44:37.000Z | 2021-05-09T07:36:28.000Z |
"""
Functions for setting/updating lane events, route events, and code for changing lanes.
"""
from havsim.simulation import vehicle_orders
def update_veh_after_lc(lc_actions, veh, timeind):
"""When a vehicle changes lanes, this function does all the necessary updates.
When a vehicle changes lanes, we need to update it's lane, road, llane/rlane, r/l_lc, lanemem,
and the lane changing model internal state.
More importantly, we need to update all the leader/follower relationships.
***Naming conventions***
Every vehicle has its leader (lead) and follower (fol). Putting l or r in front of lead/fol indicates
that it is the left/right leader/follower. Consider some vehicle, the 'ego vehicle'. The ego vehicle's
lfol is the vehicle in the left lane closest to the ego vehicle, without going past the position of the
ego vehicle. llead has two possible meanings. The llead attribute is the set of all vehicles which
have the ego vehicle as a rfol. In the context of a lane changing model, we use llead to refer to the
leader of lfol. Note that the leader of lfol is not even necessarily in the set which defines
the llead attribute.
The same definitions apply to rfol and rlead as to lfol and llead.
The other naming conventions are lcside, newlcside, and opside. If a vehicle changes to the left,
lcside (lane change side) refers to the left lane, the opside (opposite lane change side) refers to
the right lane. The newlcside (new lane change side) is the new lcside after changing lanes, so if the
side is left, it refers to two lanes to the left.
Note in this case we are using 'new' to refer to the situation after the lane change. This is another
convention used for lane changing models.
Args:
lc_actions: dictionary with keys as vehicles which request lane changes in the current timestep,
values are a string either 'l' or 'r' which indicates the side of the change
veh: Vehicle object which changes lanes, and has a key/value in lc_actions
timeind: int giving the timestep of the simulation (0 indexed)
Returns:
None. Modifies veh, and all vehicles which have a relationship with veh, in place.
"""
# TODO no check for vehicles moving into same gap (store the lcside fol/lead in lc_actions,
# check if they are the same?)
# also, this is related to the error in the edge case where a vehicle has None leader, then has 2 vehicles
# change in front of it, the leadmem gets 2 things added, and then the relaxation throws error because
# olds = None but it excepts olds is not None.
lc = lc_actions[veh]
# updates to lanes, road
lcsidelane, newlcsidelane, lc = update_lane_after_lc(veh, lc, timeind+1)
veh.update_lc_state(timeind, lc=lc)
# updates to vehicle orders
vehicle_orders.update_leadfol_after_lc(veh, lcsidelane, newlcsidelane, lc, timeind)
return
def update_lane_after_lc(veh, lc, timeind):
"""After a lane change, this updates the lane, road, pos, lanemem, r/llane, l/r_lc attributes.
Args:
veh: Vehicle object to update.
lc: side of lane change; either 'l' or 'r'
timeind: time index of change (+1 higher than the current time index in simulation)
Returns:
lcsidelane, newlcsidelane, lc
"""
if lc == 'l':
veh.rlane = veh.lane
lcsidelane = veh.llane
newroadname = lcsidelane.roadname
if newroadname != veh.road:
veh.pos -= veh.lane.roadlen[newroadname]
veh.road = newroadname
veh.r_lc = None
else:
veh.r_lc = 'discretionary'
veh.lane = lcsidelane
veh.lanemem.append((lcsidelane, timeind))
newlcsidelane = lcsidelane.get_connect_left(veh.pos)
veh.llane = newlcsidelane
if newlcsidelane is not None and newlcsidelane.roadname == veh.road:
veh.l_lc = 'discretionary'
else:
veh.l_lc = None
else:
veh.llane = veh.lane
lcsidelane = veh.rlane
newroadname = lcsidelane.roadname
if newroadname != veh.road:
veh.pos -= veh.lane.roadlen[newroadname]
veh.road = newroadname
veh.l_lc = None
else:
veh.l_lc = 'discretionary'
veh.lane = lcsidelane
veh.lanemem.append((lcsidelane, timeind))
newlcsidelane = lcsidelane.get_connect_right(veh.pos)
veh.rlane = newlcsidelane
if newlcsidelane is not None and newlcsidelane.roadname == veh.road:
veh.r_lc = 'discretionary'
else:
veh.r_lc = None
return lcsidelane, newlcsidelane, lc
def update_lane_events(veh, timeind, remove_vehicles):
"""Check if the next event from a Vehicle's lane_events should be applied, and apply it if so.
lane_events are a list of events which handle anything related to the network topology,
i.e. when the current lane ends, or when the current lane's left or right connections change.
Each event is a dictionary with the keys of
'pos': the float position the event occurs (relative to the vehicle's current lane)
'event': one of
'new lane' - occurs when a vehicle reaches the end of its current lane and transitions to a new lane
'update lr' - occurs when the current lane's left or right connections change
'exit' - occurs when a vehicle reaches the end of its current lane and exits the road network
'left': for 'new lane' or 'update lr', if the left connection changes, 'left' has a value of either
'add' - Used when the current left lane changes from None to a new lane, or when the current left
lane changes tracks.
'remove' - if the current left connection is no longer possible
'update' - if there is still a left lane in the same track, but now it refers to a new lane object
'left anchor': if 'left' is 'add', 'left anchor' is an index giving the merge anchor for the
new left lane
'right': same as left, for right side
'right anchor': same as left anchor, for right side
Args:
veh: Vehicle object to update
timeind: int giving the timestep of the simulation (0 indexed)
remove_vehicles: set of vehicles which will be removed from simulation at current timestep
Returns:
None. (Modifies Vehicle attributes in place, adds to remove_vehicles in place.)
"""
# TODO maybe combine lane events/route events into a single priority queue and keep the next event
# position in memory? maybe not worth to do.
# If this optimization is made, when set_lane_events and set_route_events is called, you would have to
# sort all the events and make sure two events with the same positions get sorted such that the route
# event occurs after (e.g. add a small constant to route event positions).
if not veh.lane_events:
return
curevent = veh.lane_events[0]
if veh.pos > curevent['pos']: # could keep curevent['pos'] in vehicle memory so we don't have to access
# it every timestep
if curevent['event'] == 'new lane':
# update lane/road/position
newlane = veh.lane.connect_to
update_new_lane(veh, veh.lane, newlane, timeind+1)
# updates left and right connections
update_lane_lr(veh, newlane, curevent)
veh.update_lc_state(timeind)
# enter new road/lane -> need new lane/route events
set_lane_events(veh)
set_route_events(veh, timeind)
elif curevent['event'] == 'update lr':
update_lane_lr(veh, veh.lane, curevent)
veh.update_lc_state(timeind)
veh.lane_events.pop(0) # event is over, so we shouldn't check it in the future
elif curevent['event'] == 'exit':
fol = veh.fol
# need to check l/rlead for edge case when you overtake and exit in same timestep?
for i in veh.llead:
i.rfol = fol
fol.llead.append(i)
for i in veh.rlead:
i.lfol = fol
fol.rlead.append(i)
# update vehicle orders
fol.lead = None
fol.leadmem.append((None, timeind+1))
if veh.lfol is not None:
veh.lfol.rlead.remove(veh)
if veh.rfol is not None:
veh.rfol.llead.remove(veh)
# to remove the vehicle set its end and put it in the remove_vehicles
veh.end = timeind+1
remove_vehicles.append(veh)
return
def update_lane_lr(veh, curlane, curevent):
"""Updates a vehicle's attributes when its lane changes its left/right connections.
For a Vehicle veh which reaches a point where its curlane.get_connect_left or get_connect_right
go from None to some Lane, or some Lane to None, there needs to be 'add' or 'remove' events for the
corresponding sides. This handles those events.
Updates the vehicle orders and defaults the lane change states to the correct behavior (by default,
enter discretionary only if the left/right lane is in the same road as the current lane).
This updates the left/right followers, l/r lane, and l/r_lc attributes.
Args:
veh: Vehicle object to update
curlane: the Lane veh is currently on, curlane has the new/ending connections
curevent: The event (dictionary) triggering the update
Returns:
None (Modifies veh attributes in place.)
"""
if curevent['left'] == 'remove':
# update lead/fol order
veh.lfol.rlead.remove(veh)
veh.lfol = None
veh.l_lc = None
veh.llane = None
elif curevent['left'] == 'add':
newllane = curlane.get_connect_left(curevent['pos'])
# get the new follower in the new track
merge_anchor = newllane.merge_anchors[curevent['left anchor']][0]
unused, newfol = curlane.leadfol_find(veh, merge_anchor, 'l')
if veh.lfol is not None:
veh.lfol.rlead.remove(veh)
veh.lfol = newfol
newfol.rlead.append(veh)
if newllane.roadname == curlane.roadname:
veh.l_lc = 'discretionary'
else:
veh.l_lc = None
veh.llane = newllane
elif curevent['left'] == 'update':
newllane = curlane.get_connect_left(curevent['pos'])
if newllane.roadname == curlane.roadname:
veh.l_lc = 'discretionary'
else:
veh.l_lc = None
veh.llane = newllane
# same thing for right
if curevent['right'] == 'remove':
veh.rfol.llead.remove(veh)
veh.rfol = None
veh.r_lc = None
veh.rlane = None
elif curevent['right'] == 'add':
newrlane = curlane.get_connect_right(curevent['pos'])
merge_anchor = newrlane.merge_anchors[curevent['right anchor']][0]
unused, newfol = curlane.leadfol_find(veh, merge_anchor, 'r')
if veh.rfol is not None:
veh.rfol.llead.remove(veh)
veh.rfol = newfol
newfol.llead.append(veh)
if newrlane.roadname == curlane.roadname:
veh.r_lc = 'discretionary'
else:
veh.r_lc = None
veh.rlane = newrlane
elif curevent['right'] == 'update':
newrlane = curlane.get_connect_right(curevent['pos'])
if newrlane.roadname == curlane.roadname:
veh.r_lc = 'discretionary'
else:
veh.r_lc = None
veh.rlane = newrlane
def update_new_lane(veh, oldlane, newlane, timeind):
"""When a vehicle enters a new lane, this updates the lane, road, pos, and lanemem attributes.
Args:
veh: Vehicle object to update.
oldlane: current Lane veh is on.
newlane: The new Lane the vehicle is changing to.
timeind: int giving the timestep of the simulation (0 indexed)
Returns:
None. Modifies veh in place
"""
newroadname = newlane.roadname
if newroadname != veh.road:
veh.pos -= oldlane.roadlen[newroadname]
veh.road = newroadname
veh.lane = newlane
veh.lanemem.append((newlane, timeind))
def set_lane_events(veh):
"""Creates lane_events attribute for Vehicle after entering a new lane.
Refer to update_lane_events for description of lane events. Note that we only need to add upcoming
lane events, and past lane events are not applied (this is in contrast to route_events, where past
events ARE applied.)
Args:
veh: Vehicle to be updated
Returns:
None (Modifies veh in place.)
"""
veh.lane_events = []
for i in veh.lane.events:
if i['pos'] > veh.pos:
veh.lane_events.append(i)
def update_route_events(veh, timeind):
"""Check if the next event from a vehicle's route_events should be applied, and apply it if so.
route_events are a list of events which handles any lane changing behavior related to
a vehicle's route, i.e. route events ensure that the vehicle follows its route.
Each event is a dictionary with the keys of
'pos': the float position the event occurs (relative to the vehicle's current lane).
'event': 'end discretionary' or 'mandatory', which end discretionary or start mandatory
lane changing states
'side': 'l_lc' or 'r_lc' the side which is updated by the event
'lc_urgency': only for a 'mandatory' event, a tuple giving the position for 0% and 100% forced cooperation
Args:
veh: Vehicle object to update
Returns:
bool: True if we made a change, to the route, False otherwise
"""
if not veh.route_events:
return False
curevent = veh.route_events[0]
if veh.pos > curevent['pos']:
if curevent['event'] == 'end discretionary':
side = curevent['side']
setattr(veh, side, None)
veh.update_lc_state(timeind)
elif curevent['event'] == 'mandatory':
setattr(veh, curevent['side'], 'mandatory')
veh.lc_urgency = curevent['lc_urgency'] # must always set urgency for mandatory changes
veh.update_lc_state(timeind)
veh.route_events.pop(0)
return True
return False
def make_cur_route(p, curlane, nextroadname):
"""Creates cur_route attribute (stores route events) for Vehicle after entering a new lane.
Refer to update_route_events for a description of route events.
Upon entering a new road, we create a cur_route which stores the list of route events for several lanes,
specifically the lanes we will ultimately end up on, as well as all lanes which we will need to cross
to reach those lanes we want to be on. We do not create the routes for every single lane on a road.
Roads have a connect_to attribute whose keys are road names and value is a tuple of:
pos: for 'continue' change_type, a float which gives the position that the current road
changes to the desired road.
for 'merge' type, a tuple of the first position that changing into the desired road
becomes possible, and the last position where it is still possible to change into that road.
change_type: if 'continue', this corresponds to the case where the current road turns into
the next road in the route; the vehicle still needs to make sure it is in the right lane
(different lanes may transition to different roads)
if 'merge', this is the situation where the vehicle needs to change lanes onto a different road.
Thus in the 'merge' case after completing its lane change, the vehicle is on the next desired
road, in contrast to the continue case where the vehicle actually needs to reach the end of
the lane in order to transition.
laneind: if 'continue', a tuple of 2 ints, giving the leftmost and rightmost lanes which will
continue to the desired lane. if 'merge', the laneind of the lane we need to be on to merge.
side: for 'merge' type only, gives whether we want to do a left or right change upon reaching
laneind ('l_lc' or 'r_lc')
nextroad: desired road
Explanation of current route model -
suppose you need to be in lane '2' by position 'x' and start in lane '1', then starting:
at x - 2*p[0] - 2*p[1] you will end discretionary changing into lane '0'
at x - p[0] - p[1] you wil begin mandatory changing into lane '2'
at x - p[0] your mandatory change will have urgency of 100% which will always force
cooperation of your l/rfol (assuming you have cooperation added to your lc model)
for lane changing with a merge/diverse (e.g. on/off-ramp) which begins at 'x' and ends at 'y',
you will start mandatory at 'x' always, reaching 100% cooperation by 'y' - p[0]
Args:
p: parameters, length 2 list of floats, where p[0] is a safety buffer for merging and p[1]
is a comfortable distance for merging
curlane: Lane object to create route events for
nextroadname: str name of the next road in the route (the next road you want to be on after leaving
curlane's road)
Returns:
cur_route: dictionary where keys are lanes, value is a list of route event dictionaries which
defines the route a vehicle with parameters p needs to take on that lane
"""
# TODO refactor route code, including how to implement diverges
# TODO we only get the route for the current road - no look ahead to take into account
# future roads. This modification may be needed if roads are short.
# Should only have to look forward one road at a time.
# TODO handle cases where LC cannot be completed successfully (put necessary info into cur_route dict)
# if you fail to follow your route, you need to be given a new route.
# the code which makes a new route can also be used if route = None when creating a vehicle
# would need to know latest point when change can take place ('pos' for 'continue' type
# or pos[1] for merge type)
# in lane changing model, it would need to check if we are getting too close and act accordingly
# (e.g. slow down) if needed. in this function, would need to add events if you miss the change,
# and in that case you would need to be given a new route.
# another option other simulators use is they simply remove a vehicle if it fails to follow its route.
curroad = curlane.road
curlaneind = curlane.laneind
# position or tuple of positions, str, tuple of 2 ints or single int, str, dict for the next road
try: # band aid for case when vehicles cannot follow their planned route
pos, change_type, laneind, side, nextroad = curlane.connections[nextroadname][:] # nextroad unused?
# roads also have name, len, num_lanes, index lanes using their lane index (0 - num_lanes-1)
except:
print(' vehicle on '+str(curlane)+' missed route which planned for going to '+nextroadname)
return {i:[] for i in curroad.lanes}
cur_route = {}
if change_type == 'continue': # -> vehicle needs to reach end of lane
# initialize for lanes which vehicle needs to continue on
leftind, rightind = laneind[:] if len(laneind)==2 else (laneind[0], laneind[0])
for i in range(leftind, rightind+1):
cur_route[curroad[i]] = []
# TODO no mandatory here? seems like a bug
if leftind > 0:
templane = curroad[leftind]
curpos = min(templane.end, curroad[leftind-1].end) # check case where templane.start > curpos?
# see todo on make_route_helper for edge case
cur_route[templane].append({'pos': curpos - p[0] - p[1],
'event': 'end discretionary', 'side': 'l_lc'})
if rightind < curroad.num_lanes-1:
templane = curroad[rightind]
curpos = min(templane.end, curroad[rightind+1].end)
cur_route[templane].append({'pos': curpos - p[0] - p[1],
'event': 'end discretionary', 'side': 'r_lc'})
if curlaneind >= leftind and curlaneind <= rightind: # if on correct lane already, do no more work
return cur_route
elif curlaneind < laneind[0]: # need to change right possibly multiple times
uselaneind = laneind[0]
else:
uselaneind = laneind[1]
cur_route = make_route_helper(p, cur_route, curroad, curlaneind, uselaneind, curroad[uselaneind].end)
elif change_type == 'merge': # logic is similar and also uses make_route_helper
templane = curroad[laneind]
pos, endpos = pos[:]
cur_route[templane] = []
# determine end discretionary event if necessary
if side == 'l_lc':
if laneind < curroad.num_lanes-1:
enddisc = min(pos, curroad[laneind+1].end)
cur_route[templane].append({'pos': enddisc - p[0] - p[1],
'event': 'end discretionary', 'side': 'r_lc'})
else:
if laneind > 0:
enddisc = min(pos, curroad[laneind-1].end)
cur_route[templane].append({'pos': enddisc - p[0] - p[1],
'event': 'end discretionary', 'side': 'l_lc'})
cur_route[templane].append({'pos': pos, 'event': 'mandatory', 'side': side,
'lc_urgency': [pos, endpos - p[0]]})
if curlaneind != laneind:
cur_route = make_route_helper(p, cur_route, curroad, curlaneind, laneind, pos)
return cur_route
def make_route_helper(p, cur_route, curroad, curlaneind, laneind, curpos):
"""Generates list of route events for all lanes with indexes [curlaneind, laneind).
Starting on curroad in lane with index curlaneind, wanting to be in lane index laneind by position curpos,
generates route events for all lanes in [curlaneind, laneind). If curlaneind < laneind, starts at
laneind -1, moving to the left until routes on all lanes are defined. Similarly for curlaneind > laneind.
Assumes we already have the route for laneind in cur_route.
Edge cases where lanes have different lengths are handled, but we assume that all lanes are connected
when they exist. E.g. for a road with 2 lanes, lane0 and lane1, you could have:
lane0.start = 0, lane0.end = 1000
lane1.start = 500, lane1.end = 1500,
lane0.connect_right = [(0, None), (lane1.start, lane1)]
lane1.connect_left = [(lane1.start, lane0)]
This configuration would work.
But if:
lane1.connect_left = [(lane1.start, None), (800, lane0)]
his case is not currently handled, because the current code does not look at the connect_left/right,
it just uses the .start, .end
Args:
p: parameters, length 2 list of floats, where p[0] is a safety buffer for merging and p[1]
is a comfortable distance for merging
cur_route: dictionary where keys are lanes, value is a list of route event dictionaries which
defines the route a vehicle with parameters p needs to take on that lane
curroad: road that the route is being generated for
curlaneind: index of the lane that the vehicle starts in
laneind: index of the lane that we want to be in by position curpos
curpos: we want to be in lane with index laneind by curpos
Returns:
cur_route: Updates cur_route in place
"""
# TODO decide if we want to handle the case explained in docstring, by checking the connect_left/right
# to get the positions of connection. These values could then be used instead of .start and .end
# Those connections could also be stored in road or lane, so they don't have to be recomputed
# during simulation.
nexttemplane = None
if curlaneind < laneind: # populate route events in lanes left of target
curind = laneind - 1
prevtemplane = curroad[curind+1]
templane = curroad[curind]
while not curind < curlaneind:
cur_route[templane] = []
# determine curpos = where the mandatory change starts (different meaning than the 'curpos'
# which is passed in)
if templane.end < curpos: # in case templane ends before the curpos
curpos = templane.end
curpos += -p[0] - p[1]
curpos = max(prevtemplane.start, curpos) # in case the lane doesn't exist at curpos
# determine enddiscpos = where the discretionary ends
# only necessary if there is something to end the discretionary into
if curind > 0:
nexttemplane = curroad[curind-1]
enddiscpos = min(curpos, nexttemplane.end)
enddiscpos = enddiscpos - p[0] - p[1]
cur_route[templane].append({'pos': enddiscpos, 'event': 'end discretionary', 'side': 'l_lc'})
# there is always a mandatory event
cur_route[templane].append({'pos': curpos, 'event': 'mandatory', 'side': 'r_lc',
'lc_urgency': [curpos, curpos + p[1]]})
# update iteration
curind += -1
prevtemplane = templane
templane = nexttemplane
# same code but for opposite side
elif curlaneind > laneind: # populate route events in lanes right of target
curind = laneind+1
prevtemplane = curroad[curind - 1]
templane = curroad[curind]
while not curind > curlaneind:
cur_route[templane] = []
# determine curpos = where the mandatory change starts
if templane.end < curpos:
curpos = templane.end
curpos += -p[0] - p[1]
curpos = max(prevtemplane.start, curpos)
if curind < curroad.num_lanes - 1:
nexttemplane = curroad[curind + 1]
enddiscpos = min(curpos, nexttemplane.end)
enddiscpos = enddiscpos - p[0] - p[1]
cur_route[templane].append({'pos': enddiscpos, 'event': 'end discretionary', 'side': 'r_lc'})
cur_route[templane].append({'pos': curpos, 'event': 'mandatory', 'side': 'l_lc',
'lc_urgency': [curpos, curpos + p[1]]})
# update iteration
curind += 1
prevtemplane = templane
templane = nexttemplane
return cur_route
def set_route_events(veh, timeind):
"""When a vehicle enters a new lane, this function generates all its route events for that lane.
Every Lane has a list of 'route events' defined for it, which ensure that the Vehicle follows its
specified route. Refer to update_route_events for a description of route events, and make_cur_route for
a description of the route model.
If a vehicle enters a new road, this function will generate the cur_route for that road and a subset
of its lanes. This function will pop from the vehicle's route when that occurs. The exception to this
is when vehicles are first initialized, the initialize method of Vehicle creates the first
cur_route, and therefore pops from the route the first time.
If a vehicle enters a new lane on the same road, it will either get the existing route
from cur_route, or if the route for the new lane does not exist, it will create it add the key/value
to cur_route. When creating a route for a new lane on the same road, it uses make_route_helper.
Args:
veh: Vehicle object which we will set its current route_events for.
Returns:
None. Modifies veh attributes in place (route_events, cur_route, possibly applies route events).
"""
# get new route events if they are stored in memory already
newlane = veh.lane
if newlane in veh.cur_route:
veh.route_events = veh.cur_route[newlane].copy() # route_events store current route events, cur_route
# stores all route events for subset of lanes on current road
else:
p = veh.route_parameters
prevlane = veh.lanemem[-2][0]
if prevlane.road is newlane.road: # on same road - use helper function to update cur_route
# need to figure out what situation we are in to give make route helper right call
prevlane_events = veh.cur_route[prevlane]
if not prevlane_events: # this can only happen for continue event => curpos = end of lane
curpos = prevlane.end
elif prevlane_events[0]['event'] == 'end discretionary':
curpos = prevlane_events[0]['pos'] + p[0] + p[1]
else: # mandatory event
curpos = prevlane_events[0]['pos']
make_route_helper(p, veh.cur_route, veh.lane.road, newlane.laneind, prevlane.laneind, curpos)
else: # on new road - we need to generate new cur_route and update the vehicle's route
veh.cur_route = make_cur_route(p, newlane, veh.route.pop(0))
veh.route_events = veh.cur_route[newlane].copy()
# for route events, past events need to be applied.
curbool = True
while curbool:
curbool = update_route_events(veh, timeind)
def update_merge_anchors(curlane, lc_actions):
"""Updates merge_anchors attribute for curlane.
Lanes have lists of merge anchors, they are used as guesses for leadfol_find for 'new lane' or
'update lanes' events when a left or right lane is added. Thus, merge anchors are used to ensure
the leader/follower relationships are updated correctly when the network topology changes.
A merge anchor is defined as a (vehicle, position) tuple. vehicle can be either an anchor or normal
vehicle. position can be either None or a float position. If position is None, vehicle is an anchor,
and does not need to be updated. Otherwise, position is a float of the position on curlane,
and the merge anchor is the vehicle on the same track as curlane which is closest to position without
yet passing position.
Merge anchors have fixed index.
position being None corresponds to the situation where a new lane starts.
position being a float corresponds to the situation where two lanes initially meet.
Unlike lfol/rfol, merge anchors do not need to be completely updated. They should be kept
in the same track as curlane however.
Args:
curlane: Lane object to update
lc_actions: dictionary with keys as vehicles which request lane changes in the current timestep,
values are a string either 'l' or 'r' which indicates the side of the change
Returns:
None. Modifies merge_anchors attribute for curlane
"""
for i in range(len(curlane.merge_anchors)):
veh, pos = curlane.merge_anchors[i][:]
if pos is None: # merge anchor is always an anchor we do nothing
# update_lrfol(veh) # no need to update lrfol for anchors
pass
else:
# veh is an anchor -> we see if we can make its leader the new merge anchor
if veh.cf_parameters is None:
lead = veh.lead
if lead is not None:
temp = curlane.roadlen[lead.road] + lead.pos
if temp - pos < 0:
curlane.merge_anchors[i][0] = lead
elif veh in lc_actions:
if lc_actions[veh] == 'l':
curlane.merge_anchors[i][0] = veh.rfol
else:
curlane.merge_anchors[i][0] = veh.lfol
elif curlane.roadlen[veh.road]+veh.pos - pos > 0:
curlane.merge_anchors[i][0] = veh.fol | 47.440299 | 110 | 0.652603 |
7da5c3c0c6f091edf50f83a961a11dfe879dabc8 | 3,167 | py | Python | src_2d/preprocessing/compute_ncc_images.py | xzluo97/MvMM-RegNet | c08d5df14b4a9c4a98c66973ff4950aba7f416e4 | [
"MIT"
] | 19 | 2020-07-14T02:23:58.000Z | 2022-03-15T12:22:49.000Z | src_2d/preprocessing/compute_ncc_images.py | xzluo97/MvMM-RegNet | c08d5df14b4a9c4a98c66973ff4950aba7f416e4 | [
"MIT"
] | 4 | 2020-09-25T22:42:40.000Z | 2021-08-25T15:03:29.000Z | src_2d/preprocessing/compute_ncc_images.py | xzluo97/MvMM-RegNet | c08d5df14b4a9c4a98c66973ff4950aba7f416e4 | [
"MIT"
] | 7 | 2020-08-29T15:46:13.000Z | 2021-07-16T01:51:28.000Z | import os
import glob
from PIL import Image
import numpy as np
from scipy import stats
import tensorflow as tf
from core import losses_2d, utils_2d
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
def _load_image(name, dtype=np.float32):
img = Image.open(name)
return np.asarray(img, dtype)
def _process_image(data):
data_norm = stats.zscore(data, axis=None, ddof=1)
return np.expand_dims(np.expand_dims(data_norm, -1), 0)
def _process_label(data, intensities=(0, 255)):
n_class = len(intensities)
label = np.zeros((np.hstack((data.shape, n_class))), dtype=np.float32)
for k in range(1, n_class):
label[..., k] = (data == intensities[k])
label[..., 0] = np.logical_not(np.sum(label[..., 1:], axis=-1))
return np.expand_dims(label, 0)
def _grayscale(image):
return ((image - image.min()) / image.ptp() * 255).astype(np.uint8)
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
data_path = '../../../../../../dataset/C0T2LGE/label_center_data/training/image_slices'
import time
os.chdir(data_path)
print(os.getcwd())
image_names = glob.glob('*image.png')
image_suffix = 'image.png'
label_suffix = 'label.png'
save_path = './ncc_images'
if not os.path.exists(save_path):
os.makedirs(save_path)
label_intensities = (0, 85, 212, 255)
image_tensor = tf.placeholder(tf.float32, [1, 144, 144, 1])
label_tensor = tf.placeholder(tf.float32, [1, 144, 144, len(label_intensities)])
# compute gradient image of intensity and label data
label_grad = utils_2d.compute_gradnorm_from_volume(label_tensor)
image_grad = tf.reduce_sum(utils_2d.compute_gradnorm_from_volume(image_tensor), axis=-1, keepdims=True)
# compute local normalized cross-correlation maps from gradient images
NCC = losses_2d.CrossCorrelation(win=7)
ncc_tensor = tf.exp(tf.concat([NCC.ncc(image_grad, label_grad[..., i, None])
for i in range(len(label_intensities))], axis=-1))
with tf.Session(config=config) as sess:
for name in image_names:
print(name)
time_start = time.time()
image = _load_image(name)
label = _load_image(name.replace(image_suffix, label_suffix))
# pre-processing
image_data = _process_image(image)
label_data = _process_label(label, label_intensities)
ncc = sess.run(ncc_tensor, feed_dict={image_tensor: image_data,
label_tensor: label_data})
# for i in range(len(label_intensities)):
# ncc_img = Image.fromarray(_grayscale(ncc[0, ..., i]))
# ncc_img.show()
print("NCC percentage: %.4f" % (np.sum(ncc > 1) / np.prod(ncc.shape)))
np.save(os.path.join(save_path, name.replace(image_suffix, 'ncc.npy')), ncc.squeeze(0))
time_end = time.time()
print("Elapsing time: %s" % (time_end - time_start))
| 33.336842 | 108 | 0.620145 |
4d6c0c629071e44db431d638a7cc7e7a92cee1c6 | 665 | py | Python | migration/versions/de2ba0a9b704_add_language.py | floresmatthew/sahasrahbot | a3fcc2aba9cd204331ce612ecf269d8a48a1ebc4 | [
"MIT"
] | null | null | null | migration/versions/de2ba0a9b704_add_language.py | floresmatthew/sahasrahbot | a3fcc2aba9cd204331ce612ecf269d8a48a1ebc4 | [
"MIT"
] | null | null | null | migration/versions/de2ba0a9b704_add_language.py | floresmatthew/sahasrahbot | a3fcc2aba9cd204331ce612ecf269d8a48a1ebc4 | [
"MIT"
] | null | null | null | """add language
Revision ID: de2ba0a9b704
Revises: b71aea760df1
Create Date: 2021-04-25 15:16:50.654227
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'de2ba0a9b704'
down_revision = 'b71aea760df1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tournaments', sa.Column('lang', sa.String(length=20), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tournaments', 'lang')
# ### end Alembic commands ###
| 22.931034 | 88 | 0.694737 |
d80b6ebc8d305846de3c746b7a3f83ed4cded0c9 | 862 | py | Python | botoy/decorators/_startswith.py | yuban10703/botoy | 892a170caea08362c22f1d909545a8c7a962ba3b | [
"MIT"
] | 32 | 2020-10-11T15:18:59.000Z | 2021-04-11T10:39:07.000Z | botoy/decorators/_startswith.py | yuban10703/botoy | 892a170caea08362c22f1d909545a8c7a962ba3b | [
"MIT"
] | 17 | 2020-10-12T15:56:19.000Z | 2021-04-03T01:53:05.000Z | botoy/decorators/_startswith.py | yuban10703/botoy | 892a170caea08362c22f1d909545a8c7a962ba3b | [
"MIT"
] | 7 | 2020-10-12T23:55:57.000Z | 2021-04-15T16:14:46.000Z | from ..model import FriendMsg, GroupMsg
from ..parser import friend as fp
from ..parser import group as gp
def startswith(string: str):
"""Content以指定前缀开头 GroupMsg, FriendMsg
:param string: 前缀字符串, 会解析图片消息的Content
"""
def deco(func):
def inner(ctx):
assert isinstance(ctx, (GroupMsg, FriendMsg))
if isinstance(ctx, GroupMsg):
pic_data = gp.pic(ctx)
else:
pic_data = fp.pic(ctx)
if pic_data is not None:
content = pic_data.Content
else:
content = ctx.Content
# 这里的content按理永远不可能为None,但就是碰到了这种情况,startswith用得比较多
# 所以先在这里增加一步判断
if content is not None and content.startswith(string):
return func(ctx)
return None
return inner
return deco
| 27.806452 | 66 | 0.571926 |
3f184ff0f70ad542a5dc2691c58f7f083cfe5dcd | 1,317 | py | Python | tests/test_designer.py | Kandongwe/RunestoneServer | f555868521b3717beec0ec42dbcbcb443c64686c | [
"MIT"
] | 344 | 2015-10-15T20:03:57.000Z | 2022-03-25T18:37:42.000Z | tests/test_designer.py | Kandongwe/RunestoneServer | f555868521b3717beec0ec42dbcbcb443c64686c | [
"MIT"
] | 1,170 | 2015-10-15T08:17:03.000Z | 2022-03-28T14:32:47.000Z | tests/test_designer.py | Kandongwe/RunestoneServer | f555868521b3717beec0ec42dbcbcb443c64686c | [
"MIT"
] | 278 | 2015-10-12T23:44:38.000Z | 2022-03-19T09:22:47.000Z | def test_build(test_client, test_user_1, runestone_db_tools):
test_user_1.make_instructor()
test_user_1.login()
test_client.validate(
"designer/build",
"build_course_1",
data=dict(
coursetype=test_user_1.course.course_name,
institution="Runestone",
startdate="01/01/2019",
python3="T",
login_required="T",
instructor="T",
projectname="build_course_1",
projectdescription="Build a course",
),
)
db = runestone_db_tools.db
res = db(db.courses.course_name == "build_course_1").select().first()
assert res.institution == "Runestone"
assert res.base_course == test_user_1.course.course_name
# Now delete it
test_client.validate("admin/deletecourse", "About Runestone")
res = db(db.courses.course_name == "build_course_1").select().first()
assert not res
# deleting the course invalidates the session
test_user_1.login()
test_client.validate(
"designer/build",
"build_course_2",
data=dict(
coursetype=test_user_1.course.course_name,
instructor="T",
startdate="",
projectname="build_course_2",
projectdescription="Build a course",
),
)
| 30.627907 | 73 | 0.611997 |
24c2ae1139c62ce7f25e0a97430c14866ff24e75 | 7,383 | py | Python | experiments/segnet.py | xboix/insideness | cf5c349b75eedfc993f91b12f578bfda4bbce989 | [
"MIT"
] | null | null | null | experiments/segnet.py | xboix/insideness | cf5c349b75eedfc993f91b12f578bfda4bbce989 | [
"MIT"
] | null | null | null | experiments/segnet.py | xboix/insideness | cf5c349b75eedfc993f91b12f578bfda4bbce989 | [
"MIT"
] | null | null | null | import numpy as np
import sys
import datasets
import copy
import pickle
class DNN(object):
def __init__(self):
self.name = "MLP1"
self.pretrained = False
self.version = 1
self.layers = 2
self.stride = 2
self.c = 1
self.n_t = 1
class Hyperparameters(object):
def __init__(self):
self.batch_size = 128
self.learning_rate = 1e-2
self.num_epochs_per_decay = 1.0
self.learning_rate_factor_per_decay = 1
self.weight_decay = 0
self.max_num_epochs = 60
self.drop_train = 1
self.drop_test = 0
self.momentum = 0.9
self.init_factor = 1
self.alpha = 0.1
self.augmentation = False
class Experiments(object):
def __init__(self, id, name, dataset, output_path, family_id, family_name):
self.name = "base"
self.log_dir_base = output_path
# Recordings
self.max_to_keep_checkpoints = 2
# Test after training:
self.skip_train = False
# Start from scratch even if it existed
self.restart = False
# Skip running experiments
self.skip = False
# Save extense summary
self.extense_summary = True
# Add ID to name:
self.ID = id
self.name = 'ID' + str(self.ID) + "_" + name
self.family_ID = family_id
self.family_name = family_name
# Add additional descriptors to Experiments
self.dataset = dataset
self.dnn = DNN()
self.hyper = Hyperparameters()
def generate_experiments_dataset(opt_data):
return Experiments(opt_data.ID, opt_data.name, opt_data, opt_data.log_dir_base, 0, 'data')
def change_dataset(opt, opt_data):
opt.dataset = opt_data
def get_experiments(output_path):
opt_data = datasets.get_datasets(output_path)
# # #
# Create set of experiments
opt = []
idx_base = 0
opt_handle = Experiments(id=idx_base, name="Coloring", dataset=opt_data[49], output_path=output_path,
family_id=0, family_name="Coloring_Optimal")
opt_handle.skip_train = True
opt_handle.dnn.name = "Crossing"
opt_handle.dnn.n_t = 30
opt += [copy.deepcopy(opt_handle)]
idx_base += 1
idx_family = 1
for idx_dataset in range(40, 50):
for c in [64]:
for l in [1, 2, 3]:
for conv_steps in [1, 2]:
for alpha in [0.1, 0.2, 0.4]:
for batch in [32, 256]:
for lr in [1e0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
opt_handle = Experiments(id=idx_base, name="SegNet_D" + str(idx_dataset),
dataset=opt_data[idx_dataset], output_path=output_path,
family_id=idx_family, family_name="SegNet_D" + str(idx_dataset))
opt_handle.dnn.name = "Segnet"
opt_handle.hyper.max_num_epochs = 30
opt_handle.dnn.base_channels = c
opt_handle.dnn.num_poolings = l
opt_handle.dnn.num_convolutions_step = conv_steps
opt_handle.hyper.learning_rate = lr
opt_handle.hyper.alpha = alpha
opt_handle.hyper.batch_size = batch
opt += [copy.deepcopy(opt_handle)]
idx_base += 1
idx_family += 1
for idx_dataset in [50]:
for c in [64]:
for l in [1, 2, 3]:
for conv_steps in [1, 2]:
for alpha in [0.1, 0.2, 0.4]:
for batch in [32, 256]:
for lr in [1e0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
opt_handle = Experiments(id=idx_base, name="SegNet_D" + str(idx_dataset),
dataset=opt_data[idx_dataset], output_path=output_path,
family_id=idx_family, family_name="SegNet_D" + str(idx_dataset))
opt_handle.dnn.name = "Segnet"
opt_handle.hyper.max_num_epochs = 30
opt_handle.dnn.base_channels = c
opt_handle.dnn.num_poolings = l
opt_handle.dnn.num_convolutions_step = conv_steps
opt_handle.hyper.learning_rate = lr
opt_handle.hyper.alpha = alpha
opt_handle.hyper.batch_size = batch
opt += [copy.deepcopy(opt_handle)]
idx_base += 1
idx_family += 1
for idx_dataset in [52]:
for c in [64]:
for l in [1, 2, 3]:
for conv_steps in [1, 2]:
for alpha in [0.1, 0.2, 0.4]:
for batch in [32, 256]:
for lr in [1e0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
opt_handle = Experiments(id=idx_base, name="SegNet_D" + str(idx_dataset),
dataset=opt_data[idx_dataset], output_path=output_path,
family_id=idx_family, family_name="SegNet_D" + str(idx_dataset))
opt_handle.dnn.name = "Segnet"
opt_handle.hyper.max_num_epochs = 30
opt_handle.dnn.base_channels = c
opt_handle.dnn.num_poolings = l
opt_handle.dnn.num_convolutions_step = conv_steps
opt_handle.hyper.learning_rate = lr
opt_handle.hyper.alpha = alpha
opt_handle.hyper.batch_size = batch
opt += [copy.deepcopy(opt_handle)]
idx_base += 1
idx_family += 1
return opt
def get_best_of_the_family(output_path):
opt_pre_cossval = get_experiments(output_path)
with open(output_path + 'selected_models.pkl', 'rb') as f:
cross = pickle.load(f)
opt =[]
for k in range(1, cross['num_families']+1):
if not k in cross:
continue
print(cross[k]['ID'])
opt_handle = opt_pre_cossval[int(cross[k]['ID'])]
opt += [copy.deepcopy(opt_handle)]
return opt
def get_experiments_selected(output_path):
NUM_TRIALS = 100
opt_pre_cossval = get_experiments(output_path)
with open(output_path + 'selected_models.pkl', 'rb') as f:
cross = pickle.load(f)
idx = 0
opt = []
for k in range(1, cross['num_families']+1):
if not k in cross:
continue
for trial in range(NUM_TRIALS):
#print(cross[k]['ID'])
opt_handle = opt_pre_cossval[int(cross[k]['ID'])]
opt_handle.ID = idx
opt_handle.name = 'ID' + str(opt_handle.ID) + "_FINAL" + str(trial) + "_" + opt_handle.family_name
idx += 1
opt += [copy.deepcopy(opt_handle)]
return opt
| 33.107623 | 112 | 0.507924 |
2cd7cd3dcf8305c7ffbd131beffa01ac7eb3c1ae | 19,792 | py | Python | nova/tests/api/openstack/compute/contrib/test_keypairs.py | bopopescu/OpenStack-DNRM-Nova | 7354f378398850113ac93b511547ed05218dc770 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/contrib/test_keypairs.py | bopopescu/OpenStack-DNRM-Nova | 7354f378398850113ac93b511547ed05218dc770 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/contrib/test_keypairs.py | bopopescu/OpenStack-DNRM-Nova | 7354f378398850113ac93b511547ed05218dc770 | [
"Apache-2.0"
] | 1 | 2020-07-24T08:49:47.000Z | 2020-07-24T08:49:47.000Z | # Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import keypairs
from nova.api.openstack import wsgi
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import policy
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
QUOTAS = quota.QUOTAS
def fake_keypair(name):
return {'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
'name': name}
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return keypair
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
self.Controller = keypairs.Controller()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Keypairs'])
self.app = fakes.wsgi_app(init_only=('os-keypairs',))
def test_keypair_list(self):
req = webob.Request.blank('/v2/fake/os-keypairs')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
response = {'keypairs': [{'keypair': fake_keypair('FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
'Keypair name must be between 1 and 255 characters long',
res_dict['badRequest']['message'])
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
'Keypair name must be between 1 and 255 characters long',
res_dict['badRequest']['message'])
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Keypair name contains unsafe characters",
res_dict['badRequest']['message'])
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertFalse('private_key' in res_dict['keypair'])
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Key pair 'create_duplicate' already exists.",
res_dict['conflictingRequest']['message'])
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual("Keypair data is invalid",
res_dict['badRequest']['message'])
def test_keypair_delete(self):
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
def test_keypair_get_keypair_not_found(self):
req = webob.Request.blank('/v2/fake/os-keypairs/DOESNOTEXIST')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v2/fake/os-keypairs/WHAT')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return {'name': 'foo', 'public_key': 'XXX', 'fingerprint': 'YYY'}
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_show_server(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank('/v2/fake/servers/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(fakes.wsgi_app(init_only=('servers',)))
self.assertEquals(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertTrue('key_name' in res_dict['server'])
self.assertEquals(res_dict['server']['key_name'], '')
def test_detail_servers(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail')
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
server_dicts = jsonutils.loads(res.body)['servers']
self.assertEquals(len(server_dicts), 5)
for server_dict in server_dicts:
self.assertTrue('key_name' in server_dict)
self.assertEquals(server_dict['key_name'], '')
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
req = webob.Request.blank('/v1.1/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['message'],
"Invalid request body")
class KeypairPolicyTest(test.TestCase):
def setUp(self):
super(KeypairPolicyTest, self).setUp()
self.KeyPairController = keypairs.KeypairController()
def _db_key_pair_get(context, user_id, name):
return {'name': 'foo', 'public_key': 'XXX', 'fingerprint': 'YYY'}
self.stubs.Set(db, "key_pair_get",
_db_key_pair_get)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
def test_keypair_list_fail_policy(self):
rules = policy.Rules({'compute_extension:keypairs:index':
policy.parse_rule('role:admin')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs')
self.assertRaises(exception.NotAuthorized,
self.KeyPairController.index,
req)
def test_keypair_list_pass_policy(self):
rules = policy.Rules({'compute_extension:keypairs:index':
policy.parse_rule('')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs')
res = self.KeyPairController.index(req)
self.assertTrue('keypairs' in res)
def test_keypair_show_fail_policy(self):
rules = policy.Rules({'compute_extension:keypairs:show':
policy.parse_rule('role:admin')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs/FAKE')
self.assertRaises(exception.NotAuthorized,
self.KeyPairController.show,
req, 'FAKE')
def test_keypair_show_pass_policy(self):
rules = policy.Rules({'compute_extension:keypairs:show':
policy.parse_rule('')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs/FAKE')
res = self.KeyPairController.show(req, 'FAKE')
self.assertTrue('keypair' in res)
def test_keypair_create_fail_policy(self):
rules = policy.Rules({'compute_extension:keypairs:create':
policy.parse_rule('role:admin')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs')
req.method = 'POST'
self.assertRaises(exception.NotAuthorized,
self.KeyPairController.create,
req, {})
def test_keypair_create_pass_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = policy.Rules({'compute_extension:keypairs:create':
policy.parse_rule('')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs')
req.method = 'POST'
res = self.KeyPairController.create(req, body)
self.assertTrue('keypair' in res)
def test_keypair_delete_fail_policy(self):
rules = policy.Rules({'compute_extension:keypairs:delete':
policy.parse_rule('role:admin')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
self.assertRaises(exception.NotAuthorized,
self.KeyPairController.delete,
req, 'FAKE')
def test_keypair_delete_pass_policy(self):
rules = policy.Rules({'compute_extension:keypairs:delete':
policy.parse_rule('')})
policy.set_rules(rules)
req = fakes.HTTPRequest.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
res = self.KeyPairController.delete(req, 'FAKE')
self.assertEqual(res.status_int, 202)
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertTrue(child.tag in exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertTrue(child.tag in kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
| 39.584 | 78 | 0.606811 |
750b2aab527161684fd957da144413d1b0440e9c | 1,703 | py | Python | setup.py | Veritaris/fastapi_contrib | 081670603917b1b7e9646c75fba5614b09823a3e | [
"MIT"
] | null | null | null | setup.py | Veritaris/fastapi_contrib | 081670603917b1b7e9646c75fba5614b09823a3e | [
"MIT"
] | null | null | null | setup.py | Veritaris/fastapi_contrib | 081670603917b1b7e9646c75fba5614b09823a3e | [
"MIT"
] | 1 | 2021-05-16T11:53:14.000Z | 2021-05-16T11:53:14.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
requirements = [
'fastapi>=0.52.0',
'pydantic>=1.4',
'contextvars;python_version<"3.7"'
]
setup_requirements = ["pytest-runner"]
test_requirements = ["pytest"]
setup(
author="Lev Rubel",
author_email="l@datacorp.ee",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Opinionated set of utilities on top of FastAPI",
install_requires=requirements,
extras_require={
"mongo": ["motor>=2.0.0"],
"ujson": ["ujson<2.0.0"],
"pytz": ["pytz"],
"jaegertracing": ["jaeger-client>=4.1.0", "opentracing>=2.2.0"],
"all": [
"motor>=2.0.0",
"ujson<2.0.0",
"pytz",
"jaeger-client>=4.1.0",
"opentracing>=2.2.0",
],
},
license="MIT license",
long_description=readme,
include_package_data=True,
keywords="fastapi_contrib",
name="fastapi_contrib",
packages=find_packages(exclude=["tests", "tests.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/identixone/fastapi_contrib",
version="0.2.9",
zip_safe=False,
)
| 27.467742 | 72 | 0.591897 |
a381cc4ca26403b63436727355c9e20c50657e1d | 3,377 | py | Python | test1/test1/settings.py | StanislavDanilov/interview | 4b113c6415cc8f18c6d0b18d40949032a8ccd8eb | [
"MIT"
] | null | null | null | test1/test1/settings.py | StanislavDanilov/interview | 4b113c6415cc8f18c6d0b18d40949032a8ccd8eb | [
"MIT"
] | null | null | null | test1/test1/settings.py | StanislavDanilov/interview | 4b113c6415cc8f18c6d0b18d40949032a8ccd8eb | [
"MIT"
] | null | null | null | """
Django settings for test1 project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-=47+84p%!+=2gd5uz&(yu-dghs=s6q#d#i26e#%@fk*jz1q8of'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test1.urls'
TEMPLATE_DIR = os.path.join(BASE_DIR, "Templates")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.778626 | 91 | 0.702102 |
e4374640e1256126daec6d3096fe1c8caf8f5f45 | 2,006 | py | Python | python/phonenumbers/data/region_SG.py | vishnuku/python-phonenumbers | 6ac2cdd06b7ccf709a8efb21629cf2c5f030e627 | [
"Apache-2.0"
] | 3 | 2018-12-02T23:09:00.000Z | 2018-12-02T23:16:59.000Z | python/phonenumbers/data/region_SG.py | carljm/python-phonenumbers | 494044aaf75443dbfd62b8d1352b441af6a458ae | [
"Apache-2.0"
] | null | null | null | python/phonenumbers/data/region_SG.py | carljm/python-phonenumbers | 494044aaf75443dbfd62b8d1352b441af6a458ae | [
"Apache-2.0"
] | null | null | null | """Auto-generated file, do not edit by hand. SG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SG = PhoneMetadata(id='SG', country_code=65, international_prefix='0[0-3]\\d',
general_desc=PhoneNumberDesc(national_number_pattern='[36]\\d{7}|[17-9]\\d{7,10}', possible_number_pattern='\\d{8,11}', possible_length=(8, 10, 11)),
fixed_line=PhoneNumberDesc(national_number_pattern='6[1-9]\\d{6}', possible_number_pattern='\\d{8}', example_number='61234567', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:8[1-8]|9[0-8])\\d{6}', possible_number_pattern='\\d{8}', example_number='81234567', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='1?800\\d{7}', possible_number_pattern='\\d{10,11}', example_number='18001234567', possible_length=(10, 11)),
premium_rate=PhoneNumberDesc(national_number_pattern='1900\\d{7}', possible_number_pattern='\\d{11}', example_number='19001234567', possible_length=(11,)),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(national_number_pattern='3[12]\\d{6}', possible_number_pattern='\\d{8}', example_number='31234567', possible_length=(8,)),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(national_number_pattern='7000\\d{7}', possible_number_pattern='\\d{11}', example_number='70001234567', possible_length=(11,)),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
number_format=[NumberFormat(pattern='([3689]\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[369]|8[1-9]']),
NumberFormat(pattern='(1[89]00)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1[89]']),
NumberFormat(pattern='(7000)(\\d{4})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['70']),
NumberFormat(pattern='(800)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['80'])],
mobile_number_portable_region=True)
| 91.181818 | 163 | 0.707876 |
28ac6bf33812c53153481b37f8bd9d729149f302 | 284 | py | Python | apple/apple/pipelines.py | wutienyang/scrapy_redis_example | 5376ae711908ed29cb837f1623239fd1f758ba6f | [
"MIT"
] | 1 | 2018-05-22T09:36:08.000Z | 2018-05-22T09:36:08.000Z | apple/apple/pipelines.py | wutienyang/scrapy_redis_example | 5376ae711908ed29cb837f1623239fd1f758ba6f | [
"MIT"
] | null | null | null | apple/apple/pipelines.py | wutienyang/scrapy_redis_example | 5376ae711908ed29cb837f1623239fd1f758ba6f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ApplePipeline(object):
def process_item(self, item, spider):
return item | 25.818182 | 65 | 0.711268 |
8b93aa26ee59445d0fcad7ddd13577ac9cfd4da2 | 4,395 | py | Python | azure-iot-device/samples/async-hub-scenarios/upload_to_blob.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | null | null | null | azure-iot-device/samples/async-hub-scenarios/upload_to_blob.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | null | null | null | azure-iot-device/samples/async-hub-scenarios/upload_to_blob.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | 1 | 2019-12-17T17:50:43.000Z | 2019-12-17T17:50:43.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import uuid
import asyncio
from azure.iot.device.aio import IoTHubDeviceClient, IoTHubModuleClient
from azure.iot.device import X509
import http.client
import pprint
import json
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
"""
Welcome to the Upload to Blob sample. To use this sample you must have azure.storage.blob installed in your python environment.
To do this, you can run:
$ pip isntall azure.storage.blob
This sample covers using the following Device Client APIs:
get_storage_info_for_blob
- used to get relevant information from IoT Hub about a linked Storage Account, including
a hostname, a container name, a blob name, and a sas token. Additionally it returns a correlation_id
which is used in the notify_blob_upload_status, since the correlation_id is IoT Hub's way of marking
which blob you are working on.
notify_blob_upload_status
- used to notify IoT Hub of the status of your blob storage operation. This uses the correlation_id obtained
by the get_storage_info_for_blob task, and will tell IoT Hub to notify any service that might be listening for a notification on the
status of the file upload task.
You can learn more about File Upload with IoT Hub here:
https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-file-upload
"""
# Host is in format "<iothub name>.azure-devices.net"
async def storage_blob(blob_info):
try:
print("Azure Blob storage v12 - Python quickstart sample")
sas_url = "https://{}/{}/{}{}".format(
blob_info["hostName"],
blob_info["containerName"],
blob_info["blobName"],
blob_info["sasToken"],
)
blob_client = BlobClient.from_blob_url(sas_url)
# Create a file in local Documents directory to upload and download
local_file_name = "data/quickstart" + str(uuid.uuid4()) + ".txt"
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), local_file_name)
# Write text to the file
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
file = open(filename, "w")
file.write("Hello, World!")
file.close()
print("\nUploading to Azure Storage as blob:\n\t" + local_file_name)
# # Upload the created file
with open(filename, "rb") as f:
result = blob_client.upload_blob(f)
return (None, result)
except Exception as ex:
print("Exception:")
print(ex)
return ex
async def main():
hostname = os.getenv("IOTHUB_HOSTNAME")
device_id = os.getenv("IOTHUB_DEVICE_ID")
x509 = X509(
cert_file=os.getenv("X509_CERT_FILE"),
key_file=os.getenv("X509_KEY_FILE"),
pass_phrase=os.getenv("PASS_PHRASE"),
)
device_client = IoTHubDeviceClient.create_from_x509_certificate(
hostname=hostname, device_id=device_id, x509=x509
)
# device_client = IoTHubModuleClient.create_from_connection_string(conn_str)
# Connect the client.
await device_client.connect()
# await device_client.get_storage_info_for_blob("fake_device", "fake_method_params")
# get the storage sas
blob_name = "fakeBlobName12"
storage_info = await device_client.get_storage_info_for_blob(blob_name)
# upload to blob
connection = http.client.HTTPSConnection(hostname)
connection.connect()
# notify iot hub of blob upload result
# await device_client.notify_upload_result(storage_blob_result)
storage_blob_result = await storage_blob(storage_info)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(storage_blob_result)
connection.close()
await device_client.notify_blob_upload_status(
storage_info["correlationId"], True, 200, "fake status description"
)
# Finally, disconnect
await device_client.disconnect()
if __name__ == "__main__":
asyncio.run(main())
| 36.02459 | 140 | 0.681911 |
a7054291bc5b201f8fb30f1b6f346aed8d07a279 | 610 | py | Python | faces/models.py | royeis/demos | 59dfcd490a7a2f1f730465d347e1fb169ca2d810 | [
"Apache-2.0"
] | null | null | null | faces/models.py | royeis/demos | 59dfcd490a7a2f1f730465d347e1fb169ca2d810 | [
"Apache-2.0"
] | null | null | null | faces/models.py | royeis/demos | 59dfcd490a7a2f1f730465d347e1fb169ca2d810 | [
"Apache-2.0"
] | null | null | null | from torch import nn
class FeedForwardNeuralNetModel(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(FeedForwardNeuralNetModel, self).__init__()
# First(hidden) linear layer
self.linearA = nn.Linear(input_dim, hidden_dim)
# Logistic activation function
self.sigmoid = nn.Sigmoid()
# Second linear layer
self.linearB = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
out = self.linearA(x)
out = self.sigmoid(out)
out = self.linearB(out)
return out
| 23.461538 | 58 | 0.616393 |
5b3ee22fcbf7ec204a2e585bc1f45a80e386b20f | 3,434 | py | Python | freeze.py | andrewginns/CycleGAN-Tensorflow-Simple | f0f843a8713823f2198a875b2b7538ea6d3519bd | [
"MIT"
] | null | null | null | freeze.py | andrewginns/CycleGAN-Tensorflow-Simple | f0f843a8713823f2198a875b2b7538ea6d3519bd | [
"MIT"
] | null | null | null | freeze.py | andrewginns/CycleGAN-Tensorflow-Simple | f0f843a8713823f2198a875b2b7538ea6d3519bd | [
"MIT"
] | null | null | null | import os, argparse
import tensorflow as tf
from tensorflow.python.framework import graph_util
dir = os.path.dirname(os.path.realpath(__file__))
# Modified from https://gist.github.com/moodoki/e37a85fb0258b045c005ca3db9cbc7f6
def freeze_graph(model_folder, output_nodes='a2b_generator/output_image',
output_filename='frozen-graph.pb',
rename_outputs=None):
#Load checkpoint
checkpoint = tf.train.get_checkpoint_state(model_folder)
input_checkpoint = checkpoint.model_checkpoint_path
output_graph = output_filename
#Devices should be cleared to allow Tensorflow to control placement of
#graph when loading on different machines
saver = tf.train.import_meta_graph(input_checkpoint + '.meta',
clear_devices=True)
graph = tf.get_default_graph()
onames = output_nodes.split(',')
#https://stackoverflow.com/a/34399966/4190475
if rename_outputs is not None:
nnames = rename_outputs.split(',')
with graph.as_default():
for o, n in zip(onames, nnames):
_out = tf.identity(graph.get_tensor_by_name(o+':0'), name=n)
onames=nnames
input_graph_def = graph.as_graph_def()
# fix batch norm nodes
for node in input_graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in xrange(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] = node.input[index] + '/read'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr: del node.attr['use_locking']
with tf.Session(graph=graph) as sess:
saver.restore(sess, input_checkpoint)
# In production, graph weights no longer need to be updated
# graph_util provides utility to change all variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, input_graph_def,
onames # unrelated nodes will be discarded
)
# Serialize and write to file
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Prune and freeze weights from checkpoints into production models')
parser.add_argument("--checkpoint_path",
default='./outputs/checkpoints/summer2winter_yosemite',
type=str, help="Path to checkpoint files")
parser.add_argument("--output_nodes",
default='a2b_generator/output_image',
type=str, help="Names of output node, comma seperated")
parser.add_argument("--output_graph",
default='/tmp/frozen-graph.pb',
type=str, help="Output graph filename")
parser.add_argument("--rename_outputs",
default=None,
type=str, help="Rename output nodes for better \
readability in production graph, to be specified in \
the same order as output_nodes")
args = parser.parse_args()
freeze_graph(args.checkpoint_path, args.output_nodes, args.output_graph, args.rename_outputs)
| 39.930233 | 93 | 0.631334 |
55a1ee53950d749f842fb73a2a2025aee4297928 | 155 | py | Python | init.py | VisionNetworkProject/tgbot | a61cfa8d1bd3009dfa80983257bfc3ff734b13c0 | [
"MIT"
] | 1 | 2018-06-28T06:49:58.000Z | 2018-06-28T06:49:58.000Z | init.py | VisionNetworkProject/tgbot | a61cfa8d1bd3009dfa80983257bfc3ff734b13c0 | [
"MIT"
] | null | null | null | init.py | VisionNetworkProject/tgbot | a61cfa8d1bd3009dfa80983257bfc3ff734b13c0 | [
"MIT"
] | null | null | null | # coding:utf-8
from vn_db import *
db.connect()
db.create_tables([Member, Chat, Message, Doorbell])
print('Database: initialization complete!')
| 17.222222 | 52 | 0.703226 |
fd07f936acb29d5e740599ecd33a9ba783c36972 | 2,037 | py | Python | Day50/main.py | SSRout/100-days-of-code | 7aafa7789a57bf701b60043fa2bf8fb61b64bfb5 | [
"MIT"
] | null | null | null | Day50/main.py | SSRout/100-days-of-code | 7aafa7789a57bf701b60043fa2bf8fb61b64bfb5 | [
"MIT"
] | null | null | null | Day50/main.py | SSRout/100-days-of-code | 7aafa7789a57bf701b60043fa2bf8fb61b64bfb5 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import ElementClickInterceptedException, NoSuchElementException
from time import sleep
FB_EMAIL = YOUR FACEBOOK LOGIN EMAIL
FB_PASSWORD = YOUR FACEBOOK PASSWORD
chrome_driver_path = YOUR CHROME DRIVER PATH
driver = webdriver.Chrome(executable_path=chrome_driver_path)
driver.get("http://www.tinder.com")
sleep(2)
login_button = driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/header/div[1]/div[2]/div/button')
login_button.click()
sleep(2)
fb_login = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div[1]/div/div[3]/span/div[2]/button')
fb_login.click()
sleep(2)
base_window = driver.window_handles[0]
fb_login_window = driver.window_handles[1]
driver.switch_to.window(fb_login_window)
print(driver.title)
email = driver.find_element_by_xpath('//*[@id="email"]')
password = driver.find_element_by_xpath('//*[@id="pass"]')
email.send_keys(FB_EMAIL)
password.send_keys(FB_PASSWORD)
password.send_keys(Keys.ENTER)
driver.switch_to.window(base_window)
print(driver.title)
sleep(5)
allow_location_button = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[1]')
allow_location_button.click()
notifications_button = driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[2]')
notifications_button.click()
cookies = driver.find_element_by_xpath('//*[@id="content"]/div/div[2]/div/div/div[1]/button')
cookies.click()
for n in range(100):
sleep(1)
try:
print("called")
like_button = driver.find_element_by_xpath(
'//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[2]/div[4]/button')
like_button.click()
except ElementClickInterceptedException:
try:
match_popup = driver.find_element_by_css_selector(".itsAMatch a")
match_popup.click()
except NoSuchElementException:
sleep(2)
driver.quit() | 33.95 | 132 | 0.737359 |
2b5711504d8d24e68a622aafe9573face75c148c | 6,306 | py | Python | trafficsignrecognition/correlationfilter/correlationfilter.py | nontas/trafficsignrecognition | dcf0c2657c14098842ee5f9b9a5cf72be8be7d52 | [
"BSD-3-Clause"
] | null | null | null | trafficsignrecognition/correlationfilter/correlationfilter.py | nontas/trafficsignrecognition | dcf0c2657c14098842ee5f9b9a5cf72be8be7d52 | [
"BSD-3-Clause"
] | 1 | 2017-03-25T10:07:28.000Z | 2017-03-28T08:34:41.000Z | trafficsignrecognition/correlationfilter/correlationfilter.py | nontas/trafficsignrecognition | dcf0c2657c14098842ee5f9b9a5cf72be8be7d52 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from numpy.fft import fft2, ifft2, ifftshift
from functools import partial
from scipy.sparse import spdiags, eye as speye
from scipy.sparse.linalg import spsolve
from menpofit.visualize import print_progress
from menpofit.math.fft_utils import pad, crop
def train_mosse(X, y, l=0.01, boundary='symmetric', crop_filter=True,
prefix='', verbose=True):
r"""
Minimum Output Sum of Squared Errors (MOSSE) filter.
Parameters
----------
X : ``(n_images, n_channels, image_h, image_w)`` `ndarray`
The training images.
y : ``(1, response_h, response_w)`` `ndarray`
The desired response.
l : `float`, optional
Regularization parameter.
boundary : ``{'constant', 'symmetric'}``, optional
Determines the padding applied on the images.
crop_filter : `bool`, optional
If ``True``, the shape of the MOSSE filter is the same as the shape
of the desired response. If ``False``, the filter's shape is equal to:
``X[0].shape + y.shape - 1``.
prefix : `str`, optional
The prefix of the progress bar.
verbose : `bool`, optional
If ``True``, then a progress bar is printed.
Returns
-------
f : ``(1, response_h, response_w)`` `ndarray`
The learned Minimum Output Sum of Squared Errors (MOSSE) filter.
References
----------
.. [1] D. S. Bolme, J. R. Beveridge, B. A. Draper, and Y. M. Lui. "Visual
Object Tracking using Adaptive Correlation Filters", IEEE Proceedings
of International Conference on Computer Vision and Pattern Recognition
(CVPR), 2010.
"""
# number of images, number of channels, height and width
n, k, hx, wx = X.shape
# height and width of desired responses
_, hy, wy = y.shape
y_shape = (hy, wy)
# extended shape
ext_h = hx + hy - 1
ext_w = wx + wy - 1
ext_shape = (ext_h, ext_w)
# extend desired response
ext_y = pad(y, ext_shape)
# fft of extended desired response
fft_ext_y = fft2(ext_y)
# auto and cross spectral energy matrices
sXX = 0
sXY = 0
# for each training image and desired response
wrap = partial(print_progress, prefix=prefix + 'Learning filter',
verbose=verbose, end_with_newline=False)
for x in wrap(X):
# extend image
ext_x = pad(x, ext_shape, boundary=boundary)
# fft of extended image
fft_ext_x = fft2(ext_x)
# update auto and cross spectral energy matrices
sXX += fft_ext_x.conj() * fft_ext_x
sXY += fft_ext_x.conj() * fft_ext_y
# compute desired correlation filter
fft_ext_f = sXY / (sXX + l)
# reshape extended filter to extended image shape
fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))
# compute extended filter inverse fft
f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))
if crop_filter:
# crop extended filter to match desired response shape
f = crop(f, y_shape)
# # Flip filter
# f = f[:, ::-1, :]
# f = f[:, :, ::-1]
return f
def train_mccf(X, y, l=0.01, boundary='symmetric', crop_filter=True,
prefix='', verbose=True):
r"""
Multi-Channel Correlation (MCCF) Filter.
Parameters
----------
X : ``(n_images, n_channels, image_h, image_w)`` `ndarray`
The training images.
y : ``(1, response_h, response_w)`` `ndarray`
The desired response.
l : `float`, optional
Regularization parameter.
boundary : ``{'constant', 'symmetric'}``, optional
Determines the padding applied on the images.
crop_filter : `bool`, optional
If ``True``, the shape of the MCCF filter is the same as the shape
of the desired response. If ``False``, the filter's shape is equal to:
``X[0].shape + y.shape - 1``.
prefix : `str`, optional
The prefix of the progress bar.
verbose : `bool`, optional
If ``True``, then a progress bar is printed.
Returns
-------
f : ``(1, response_h, response_w)`` `ndarray`
The learned Multi-Channel Correlation Filter (MCCF) filter.
References
----------
.. [1] H. K. Galoogahi, T. Sim, and Simon Lucey. "Multi-Channel
Correlation Filters". IEEE Proceedings of International Conference on
Computer Vision (ICCV), 2013.
"""
# number of images; number of channels, height and width
n, k, hx, wx = X.shape
# height and width of desired responses
_, hy, wy = y.shape
y_shape = (hy, wy)
# extended shape
ext_h = hx + hy - 1
ext_w = wx + wy - 1
ext_shape = (ext_h, ext_w)
# extended dimensionality
ext_d = ext_h * ext_w
# extend desired response
ext_y = pad(y, ext_shape)
# fft of extended desired response
fft_ext_y = fft2(ext_y)
# extend images
ext_X = pad(X, ext_shape, boundary=boundary)
# auto and cross spectral energy matrices
sXX = 0
sXY = 0
# for each training image and desired response
wrap = partial(print_progress, prefix=prefix + 'Learning filter',
verbose=verbose, end_with_newline=False)
for ext_x in wrap(ext_X):
# fft of extended image
fft_ext_x = fft2(ext_x)
# store extended image fft as sparse diagonal matrix
diag_fft_x = spdiags(fft_ext_x.reshape((k, -1)),
-np.arange(0, k) * ext_d, ext_d * k, ext_d).T
# vectorize extended desired response fft
diag_fft_y = fft_ext_y.ravel()
# update auto and cross spectral energy matrices
sXX += diag_fft_x.conj().T.dot(diag_fft_x)
sXY += diag_fft_x.conj().T.dot(diag_fft_y)
# solve ext_d independent k x k linear systems (with regularization)
# to obtain desired extended multi-channel correlation filter
fft_ext_f = spsolve(sXX + l * speye(sXX.shape[-1]), sXY)
# reshape extended filter to extended image shape
fft_ext_f = fft_ext_f.reshape((k, ext_h, ext_w))
# compute filter inverse fft
f = np.real(ifftshift(ifft2(fft_ext_f), axes=(-2, -1)))
if crop_filter:
# crop extended filter to match desired response shape
f = crop(f, y_shape)
# # Flip filter
# f = f[:, ::-1, :]
# f = f[:, :, ::-1]
return f
| 32.505155 | 78 | 0.618617 |
39b96e80d77fbe9945c622003c0b48f3dd15e838 | 4,161 | py | Python | b2sdk/v1/bucket.py | ehossack/b2-sdk-python | 034bec38671c0862b6956915993061359dbd51f6 | [
"MIT"
] | null | null | null | b2sdk/v1/bucket.py | ehossack/b2-sdk-python | 034bec38671c0862b6956915993061359dbd51f6 | [
"MIT"
] | null | null | null | b2sdk/v1/bucket.py | ehossack/b2-sdk-python | 034bec38671c0862b6956915993061359dbd51f6 | [
"MIT"
] | null | null | null | ######################################################################
#
# File: b2sdk/v1/bucket.py
#
# Copyright 2021 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from typing import Optional
from .file_version import translate_single_file_version, FileVersionInfoFactory
from b2sdk import _v2 as v2
from b2sdk.utils import validate_b2_file_name
# Overridden to retain the obsolete copy_file and start_large_file methods
class Bucket(v2.Bucket):
FILE_VERSION_FACTORY = staticmethod(FileVersionInfoFactory)
def copy_file(
self,
file_id,
new_file_name,
bytes_range=None,
metadata_directive=None,
content_type=None,
file_info=None,
destination_encryption: Optional[v2.EncryptionSetting] = None,
source_encryption: Optional[v2.EncryptionSetting] = None,
file_retention: Optional[v2.FileRetentionSetting] = None,
legal_hold: Optional[v2.LegalHold] = None,
):
"""
Creates a new file in this bucket by (server-side) copying from an existing file.
:param str file_id: file ID of existing file
:param str new_file_name: file name of the new file
:param tuple[int,int],None bytes_range: start and end offsets (**inclusive!**), default is the entire file
:param b2sdk.v1.MetadataDirectiveMode,None metadata_directive: default is :py:attr:`b2sdk.v1.MetadataDirectiveMode.COPY`
:param str,None content_type: content_type for the new file if metadata_directive is set to :py:attr:`b2sdk.v1.MetadataDirectiveMode.REPLACE`, default will copy the content_type of old file
:param dict,None file_info: file_info for the new file if metadata_directive is set to :py:attr:`b2sdk.v1.MetadataDirectiveMode.REPLACE`, default will copy the file_info of old file
:param b2sdk.v1.EncryptionSetting destination_encryption: encryption settings for the destination
(``None`` if unknown)
:param b2sdk.v1.EncryptionSetting source_encryption: encryption settings for the source
(``None`` if unknown)
:param b2sdk.v1.FileRetentionSetting file_retention: retention setting for the new file
:param bool legal_hold: legalHold setting for the new file
"""
return self.api.session.copy_file(
file_id,
new_file_name,
bytes_range,
metadata_directive,
content_type,
file_info,
self.id_,
destination_server_side_encryption=destination_encryption,
source_server_side_encryption=source_encryption,
file_retention=file_retention,
legal_hold=legal_hold,
)
def start_large_file(
self,
file_name,
content_type=None,
file_info=None,
file_retention: Optional[v2.FileRetentionSetting] = None,
legal_hold: Optional[v2.LegalHold] = None,
):
"""
Start a large file transfer.
:param str file_name: a file name
:param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name
:param dict,None file_info: a file info to store with the file or ``None`` to not store anything
:param b2sdk.v1.FileRetentionSetting file_retention: retention setting for the new file
:param bool legal_hold: legalHold setting for the new file
"""
validate_b2_file_name(file_name)
return self.api.services.large_file.start_large_file(
self.id_,
file_name,
content_type=content_type,
file_info=file_info,
file_retention=file_retention,
legal_hold=legal_hold,
)
create_file = translate_single_file_version(v2.Bucket.create_file)
create_file_stream = translate_single_file_version(v2.Bucket.create_file_stream)
copy = translate_single_file_version(v2.Bucket.copy)
class BucketFactory(v2.BucketFactory):
BUCKET_CLASS = staticmethod(Bucket)
| 42.030303 | 197 | 0.66883 |
8e499b8640f5d2741faf8c3f7fefc259427890b3 | 9,657 | py | Python | core/domain/rule_domain.py | aldeka/oppia | aead304c95a282c9ca8035bc25c4794864d07578 | [
"Apache-2.0"
] | 3 | 2015-01-10T23:45:23.000Z | 2015-02-17T10:46:08.000Z | core/domain/rule_domain.py | aldeka/oppia | aead304c95a282c9ca8035bc25c4794864d07578 | [
"Apache-2.0"
] | null | null | null | core/domain/rule_domain.py | aldeka/oppia | aead304c95a282c9ca8035bc25c4794864d07578 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes relating to rules."""
__author__ = 'Sean Lip'
import inspect
import os
import pkgutil
from extensions.objects.models import objects
import feconf
import jinja_utils
AND_RULE_TYPE = 'and_rule'
OR_RULE_TYPE = 'or_rule'
NOT_RULE_TYPE = 'not_rule'
DEFAULT_RULE_TYPE = 'default'
ATOMIC_RULE_TYPE = 'atomic'
# TODO(sll): In the frontend, use the rule descriptions as the single source
# of truth for the params.
def get_obj_type_for_param_name(rule_class, param_name):
"""Gets the obj type for a given param name."""
param_list = get_param_list(rule_class.description)
for item in param_list:
if item[0] == param_name:
return item[1]
raise Exception(
'Rule %s has no param called %s' % (rule_class.__name__, param_name))
def get_rules_for_obj_type(obj_type):
"""Gets all rules for a given object type.
Args:
obj_type: str. The name of the object type.
"""
rule_dir = os.path.join(os.getcwd(), feconf.RULES_DIR)
rule_class_name = '%sRule' % obj_type
results = []
for loader, name, _ in pkgutil.iter_modules(path=[rule_dir]):
if name.endswith('_test'):
continue
module = loader.find_module(name).load_module(name)
for name, clazz in inspect.getmembers(module, inspect.isclass):
ancestors = clazz.__bases__
ancestor_class_names = [c.__name__ for c in ancestors]
if rule_class_name in ancestor_class_names:
results.append(clazz)
return results
def get_param_list(description):
"""Get a parameter list from the rule description."""
param_list = []
while description.find('{{') != -1:
opening_index = description.find('{{')
description = description[opening_index + 2:]
bar_index = description.find('|')
param_name = description[: bar_index]
description = description[bar_index + 1:]
closing_index = description.find('}}')
normalizer_string = description[: closing_index]
description = description[closing_index + 2:]
param_list.append(
(param_name, getattr(objects, normalizer_string))
)
return param_list
class Rule(object):
"""Abstract base class for a value object that represents a rule.
All rules assume that the subject and rule initialization parameters
are JSONifiable objects (such as primitives, lists, dicts, and
compositions of these, but NOT sets, tuples, etc.). This is enforced
by normalizing the subject and rule initialization parameters to
JSONifiable objects before any evaluations are performed.
"""
subject_type = None
# Description of the rule, e.g. "is equal to {{x|Int}}". Should be
# overridden by subclasses.
description = ''
# Flags a rule as generic or not.
is_generic = False
_PARAMS = None
_fs = None
@property
def params(self):
if self._PARAMS is None:
# Derive the rule params from its description.
self._PARAMS = get_param_list(self.description)
return self._PARAMS
def __init__(self, *args):
if len(args) != len(self.params):
raise ValueError(
'Expected parameters %s, received %s' % (self.params, args))
for ind, param_tuple in enumerate(self.params):
setattr(self, param_tuple[0], param_tuple[1].normalize(args[ind]))
self._validate_params()
def _validate_params(self):
"""Validates the rule object immediately after initialization."""
pass
def _evaluate(self, subject):
"""Returns a boolean indicating the truth value of the evaluation."""
raise NotImplementedError
def set_fs(self, fs):
"""Set an abstract file system to use with this rule."""
self._fs = fs
return self
@property
def fs(self):
return self._fs
def eval(self, subject):
"""Public evaluation method.
Args:
subject: the thing to be evaluated.
Returns:
bool: the result of the evaluation.
"""
return self._evaluate(self.subject_type.normalize(subject))
def get_rule_description(definition, param_specs, answer_type):
"""Gets the description of a rule based on a rule-spec definition dict.
param_specs is the param specifications list for the exploration.
answer_type is a str denoting the type of the reader's answer.
Here is a sample definition in YAML form which represents the rule
'if answer < 5 and (has_seen_before == True or answer > 2) and (
not answer == 3)'.
rule_type: and_rule
children:
- rule_type: atomic
name: LessThan
subject: answer
inputs:
x: 5
- rule_type: or_rule
children:
- rule_type: atomic
name: Equals
subject: has_seen_before
inputs:
x: True
- rule_type: atomic
name: GreaterThan
subject: answer
inputs:
x: 2
- rule_type: not_rule
child:
- rule_type: atomic
name: Equals
subject: answer
inputs:
x: 3
"""
if 'rule_type' not in definition:
raise Exception('No rule type specified when constructing rule.')
elif definition['rule_type'] == DEFAULT_RULE_TYPE:
return 'Default'
elif definition['rule_type'] == ATOMIC_RULE_TYPE:
if definition['subject'] == 'answer':
subject_type = answer_type
else:
subject_type = param_specs[definition['subject']].obj_type
all_rule_classes = get_rules_for_obj_type(subject_type)
rule = next(r for r in all_rule_classes
if r.__name__ == definition['name'])
return rule.description
elif definition['rule_type'] == AND_RULE_TYPE:
return ' and '.join([
get_rule_description(child_dict, param_specs, answer_type)
for child_dict in definition['children']
])
elif definition['rule_type'] == OR_RULE_TYPE:
return ' or '.join([
get_rule_description(child_dict, param_specs, answer_type)
for child_dict in definition['children']
])
elif definition['rule_type'] == NOT_RULE_TYPE:
# Put 'not' after the first word.
description = get_rule_description(
definition['child'], param_specs, answer_type)
words = description.split()
words.insert(1, 'not')
return ' '.join(words)
else:
raise Exception('Unrecognized rule type %s' % definition['rule_type'])
def evaluate_rule(definition, param_specs, answer_type, context_params, answer,
fs):
"""Evaluates a rule definition using context_params. Returns a boolean."""
if 'rule_type' not in definition:
raise Exception('No rule type specified when constructing rule.')
elif definition['rule_type'] == DEFAULT_RULE_TYPE:
return True
elif definition['rule_type'] == ATOMIC_RULE_TYPE:
subject_name = definition['subject']
if subject_name == 'answer':
subject_type = answer_type
else:
subject_type = param_specs[subject_name].obj_type
all_rule_classes = get_rules_for_obj_type(subject_type)
rule = next(r for r in all_rule_classes
if r.__name__ == definition['name'])
param_list = []
param_defns = get_param_list(rule.description)
for (param_name, obj_cls) in param_defns:
parsed_param = definition['inputs'][param_name]
if (isinstance(parsed_param, basestring) and '{{' in parsed_param):
parsed_param = jinja_utils.parse_string(
parsed_param, context_params, autoescape=False)
normalized_param = obj_cls.normalize(parsed_param)
param_list.append(normalized_param)
if subject_name == 'answer':
subject = answer
else:
subject = context_params[subject_name]
constructed_rule = rule(*param_list)
constructed_rule.set_fs(fs)
return constructed_rule.eval(subject)
elif definition['rule_type'] == AND_RULE_TYPE:
for child_dict in definition['children']:
if not evaluate_rule(
child_dict, param_specs, answer_type, context_params,
answer):
return False
return True
elif definition['rule_type'] == OR_RULE_TYPE:
for child_dict in definition['children']:
if evaluate_rule(
child_dict, param_specs, answer_type, context_params,
answer):
return True
return False
elif definition['rule_type'] == NOT_RULE_TYPE:
return (not evaluate_rule(
definition['child'], param_specs, answer_type, context_params,
answer))
else:
raise Exception('Unrecognized rule type %s' % definition['rule_type'])
| 31.558824 | 79 | 0.641607 |
cb569f7167e32bed64389f83e825f21c8c730c9c | 11,517 | py | Python | Training/ptsemseg/models/td4_psp/resnet.py | Xlinford/TDNet | e7cb59c40b8751b6dab9691d26ad224fd61c24d1 | [
"MIT"
] | null | null | null | Training/ptsemseg/models/td4_psp/resnet.py | Xlinford/TDNet | e7cb59c40b8751b6dab9691d26ad224fd61c24d1 | [
"MIT"
] | null | null | null | Training/ptsemseg/models/td4_psp/resnet.py | Xlinford/TDNet | e7cb59c40b8751b6dab9691d26ad224fd61c24d1 | [
"MIT"
] | null | null | null | """Dilated ResNet"""
import math
import torch
import torch.utils.model_zoo as model_zoo
import torch.nn as nn
from torch.nn import functional as F
from .utils.model_store import get_model_file
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'BasicBlock', 'Bottleneck']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return nn.AdaptiveAvgPool2d(inputs, 1).view(inputs.size(0), -1)
class BasicBlock(nn.Module):
"""ResNet BasicBlock
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, previous_dilation=1,
norm_layer=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=previous_dilation, dilation=previous_dilation, bias=False)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.norm_layer = norm_layer
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1,
downsample=None, previous_dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(
planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def _sum_each(self, x, y):
assert(len(x) == len(y))
z = []
for i in range(len(x)):
z.append(x[i]+y[i])
return z
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
# pylint: disable=unused-variable
def __init__(self, block, layers, num_classes=1000, dilated=False, multi_grid=False,
deep_base=True, for_seg=False, norm_layer=nn.BatchNorm2d):
self.inplanes = 128 if deep_base else 64
super(ResNet, self).__init__()
self.norm_layer = norm_layer
if deep_base:
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False),
norm_layer(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
norm_layer(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False),
)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, norm_layer=norm_layer)
if multi_grid:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer,
multi_grid=True)
else:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer)
else:
if for_seg:
dilation= [2,4]
else:
dilation= [1,1]
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilation=dilation[0],norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilation=dilation[1],norm_layer=norm_layer)
self.avgpool = GlobalAvgPool2d()
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None, multi_grid=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm_layer(planes * block.expansion),
)
layers = []
multi_dilations = [4, 8, 16]
if multi_grid:
layers.append(block(self.inplanes, planes, stride, dilation=multi_dilations[0],
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, planes, stride, dilation=1,
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, dilation=2,
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if multi_grid:
layers.append(block(self.inplanes, planes, dilation=multi_dilations[i],
previous_dilation=dilation, norm_layer=norm_layer))
else:
layers.append(block(self.inplanes, planes, dilation=dilation, previous_dilation=dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, aux=True):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
c4 = self.layer4(c3)
if aux:
return c3,c4
else:
return c4
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, (self.norm_layer)):
nowd_params += list(module.parameters())
return wd_params, nowd_params
def resnet10(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
#if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, root='~/.encoding/models', **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.load(
get_model_file('resnet50', root=root)), strict=True)
return model
def resnet101(pretrained=False, root='~/.encoding/models', **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.load(
get_model_file('resnet101', root=root)), strict=True)
return model
def resnet152(pretrained=False, root='~/.encoding/models', **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.load(
get_model_file('resnet152', root=root)), strict=True)
return model
| 36.795527 | 107 | 0.56829 |
94bd5f1d40ad91ce080f836dfd4fa087c0848876 | 1,300 | py | Python | testedpy/intersectionEllipse_new.py | ljm0/RandomEllipse_CrowdingZone | 2c920abb0381cc79e5d3aa09429e37d5c5196832 | [
"BSD-2-Clause"
] | 4 | 2018-11-01T22:49:46.000Z | 2019-01-16T10:12:01.000Z | testedpy/intersectionEllipse_new.py | ljm0/RandomEllipse_CrowdingZone | 2c920abb0381cc79e5d3aa09429e37d5c5196832 | [
"BSD-2-Clause"
] | null | null | null | testedpy/intersectionEllipse_new.py | ljm0/RandomEllipse_CrowdingZone | 2c920abb0381cc79e5d3aa09429e37d5c5196832 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 1 18:06:20 2018
@author: MiaoLi
"""
import numpy as np
from shapely.geometry.polygon import LinearRing
import matplotlib.pyplot as plt
def ellipse_polyline_intersection(ellipses, n=100):
t = np.linspace(0, 2*np.pi, n, endpoint=False)
st = np.sin(t)
ct = np.cos(t)
result = []
for x0, y0, a, b, angle in ellipses:
angle = np.deg2rad(angle)
sa = np.sin(angle)
ca = np.cos(angle)
p = np.empty((n, 2))
p[:, 0] = x0 + a * ca * ct - b * sa * st
p[:, 1] = y0 + a * sa * ct + b * ca * st
result.append(p)
#ellipseA, ellipseB are the dots of two ellipse
ellipseA = result[0]
ellipseB = result[1]
ea = LinearRing(ellipseA)
eb = LinearRing(ellipseB)
mp = ea.intersection(eb)
#intersectionX, intersectionY are the intersections
intersectionX = [p.x for p in mp]
intersectionY = [p.y for p in mp]
#if you want to draw the two ellipse:
plt.plot(intersectionX, intersectionY, "o")
plt.plot(ellipseA[:, 0], ellipseA[:, 1])
plt.plot(ellipseB[:, 0], ellipseB[:, 1])
return intersectionX, intersectionY
ellipses = [(1, 1, 1.5, 1.8, 90), (2, 0.5, 5, 1.5, -180)]
intersectionX, intersectionY = ellipse_polyline_intersection(ellipses)
| 28.888889 | 70 | 0.612308 |
55e8245d55b3e80f28dd28504b0f7a46a038fb58 | 8,636 | py | Python | applications/welcome/languages/de.py | Querra/piraoke | 9fd067dbcf55dd505c7825a0745c74bdbb5d1231 | [
"BSD-3-Clause"
] | null | null | null | applications/welcome/languages/de.py | Querra/piraoke | 9fd067dbcf55dd505c7825a0745c74bdbb5d1231 | [
"BSD-3-Clause"
] | null | null | null | applications/welcome/languages/de.py | Querra/piraoke | 9fd067dbcf55dd505c7825a0745c74bdbb5d1231 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
{
'!langcode!': 'de',
'!langname!': 'Deutsch (DE)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '""Update" ist ein optionaler Ausdruck wie "Feld1 = \'newvalue". JOIN Ergebnisse können nicht aktualisiert oder gelöscht werden',
'%s %%(shop)': '%s %%(shop)',
'%s %%(shop[0])': '%s %%(shop[0])',
'%s %%{quark[0]}': '%s %%{quark[0]}',
'%s %%{row} deleted': '%s %%{row} deleted',
'%s %%{row} updated': '%s %%{row} updated',
'%s %%{shop[0]}': '%s %%{shop[0]}',
'%s %%{shop}': '%s %%{shop}',
'%s selected': '%s selected',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'?': '?',
'@markmin\x01**Hello World**': '**Hallo Welt**',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Ein Fehler ist aufgetreten, bitte [[laden %s]] Sie die Seite neu',
'About': 'Über',
'Access Control': 'Zugangskontrolle',
'admin': 'admin',
'Administrative Interface': 'Administrationsoberfläche',
'Ajax Recipes': 'Ajax Rezepte',
'appadmin is disabled because insecure channel': 'Appadmin ist deaktiviert, wegen der Benutzung eines unsicheren Kanals',
'Are you sure you want to delete this object?': 'Sind Sie sich sicher, dass Sie dieses Objekt löschen wollen?',
'Available Databases and Tables': 'Verfügbare Datenbanken und Tabellen',
'Buy this book': 'Dieses Buch kaufen',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache geleert',
'Cache Keys': 'Cache Schlüssel',
'Cannot be empty': 'Darf nicht leer sein',
'Check to delete': 'Auswählen um zu löschen',
'Clear CACHE?': 'CACHE löschen?',
'Clear DISK': 'DISK löschen',
'Clear RAM': 'RAM löschen',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Komponenten und Plugins',
'Config.ini': 'Config.ini',
'Confirm Password': 'Confirm Password',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Created By': 'Erstellt von',
'Created On': 'Erstellt am',
'Current request': 'Derzeitiger Request',
'Current response': 'Derzeitige Response',
'Current session': 'Derzeitige Session',
'customize me!': 'Pass mich an!',
'data uploaded': 'Datei hochgeladen',
'Database': 'Datenbank',
'Database %s select': 'Datenbank %s ausgewählt',
'Database Administration (appadmin)': 'Datenbankadministration (appadmin)',
'db': 'db',
'DB Model': 'Muster-DB',
'Delete:': 'Lösche:',
'Demo': 'Demo',
'Deployment Recipes': 'Entwicklungsrezepte',
'Description': 'Beschreibung',
'design': 'Design',
'Design': 'Design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk gelöscht',
'Documentation': 'Dokumentation',
"Don't know what to do?": 'Wissen Sie nicht weiter?',
'done!': 'Fertig!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit current record': 'Diesen Eintrag editieren',
'Email and SMS': 'Email und SMS',
'Enter an integer between %(min)g and %(max)g': 'Eine Zahl zwischen %(min)g und %(max)g eingeben',
'enter an integer between %(min)g and %(max)g': 'eine Zahl zwischen %(min)g und %(max)g eingeben',
'enter date and time as %(format)s': 'ein Datum und eine Uhrzeit als %(format)s eingeben',
'Errors': 'Fehlermeldungen',
'export as csv file': 'als csv Datei exportieren',
'FAQ': 'FAQ',
'First name': 'Vorname',
'Forms and Validators': 'Forms und Validators',
'Free Applications': 'Kostenlose Anwendungen',
'Graph Model': 'Muster-Graph',
'Group %(group_id)s created': 'Gruppe %(group_id)s erstellt',
'Group ID': 'Gruppen ID',
'Group uniquely assigned to user %(id)s': 'Gruppe eindeutigem Benutzer %(id)s zugewiesen',
'Groups': 'Gruppen',
'Hello World': 'Hallo Welt',
'Hello World ## Kommentar': 'Hallo Welt ',
'Hello World## Kommentar': 'Hallo Welt',
'Helping web2py': 'Helping web2py',
'Home': 'Startseite',
'How did you get here?': 'Wie sind Sie hier her gelangt?',
'import': 'Importieren',
'Import/Export': 'Importieren/Exportieren',
'Internal State': 'Innerer Zustand',
'Introduction': 'Einführung',
'Invalid email': 'Ungültige Email',
'Invalid Query': 'Ungültige Query',
'invalid request': 'Ungültiger Request',
'Is Active': 'Ist aktiv',
'Key': 'Schlüssel',
'Last name': 'Nachname',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Log In': 'Log In',
'Logged in': 'Eingeloggt',
'Logged out': 'Ausgeloggt',
'Login': 'Einloggen',
'Logout': 'Ausloggen',
'Lost Password': 'Passwort vergessen',
'Lost password?': 'Passwort vergessen?',
'Manage %(action)s': '%(action)s verwalten',
'Manage Access Control': 'Zugangskontrolle verwalten',
'Manage Cache': 'Cache verwalten',
'Memberships': 'Mitgliedschaften',
'Menu Model': 'Menü-Muster',
'Modified By': 'Verändert von',
'Modified On': 'Verändert am',
'My Sites': 'Meine Seiten',
'Name': 'Name',
'New Record': 'Neuer Eintrag',
'new record inserted': 'neuer Eintrag hinzugefügt',
'next %s rows': 'nächste %s Reihen',
'No databases in this application': 'Keine Datenbank in dieser Anwendung',
'Object or table name': 'Objekt- oder Tabellenname',
'Online book': 'Online book',
'Online examples': 'Online Beispiele',
'or import from csv file': 'oder von csv Datei importieren',
'Origin': 'Ursprung',
'Other Plugins': 'Andere Plugins',
'Other Recipes': 'Andere Rezepte',
'Overview': 'Überblick',
'Password': 'Passwort',
"Password fields don't match": 'Passwortfelder sind nicht gleich',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': 'Bitte geben Sie ihr Passwort erneut ein',
'Plugins': 'Plugins',
'Powered by': 'Unterstützt von',
'Preface': 'Allgemeines',
'previous %s rows': 'vorherige %s Reihen',
'Profile': 'Profil',
'pygraphviz library not found': 'pygraphviz Bibliothek wurde nicht gefunden',
'Python': 'Python',
'Query:': 'Query:',
'Quick Examples': 'Kurze Beispiele',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Rezepte',
'Record': 'Eintrag',
'record does not exist': 'Eintrag existiert nicht',
'Record ID': 'ID des Eintrags',
'Record id': 'id des Eintrags',
'Register': 'Register',
'Registration identifier': 'Registrierungsbezeichnung',
'Registration key': 'Registierungsschlüssel',
'Registration successful': 'Registrierung erfolgreich',
'Remember me (for 30 days)': 'Eingeloggt bleiben (30 Tage lang)',
'Reset Password key': 'Passwortschlüssel zurücksetzen',
'Role': 'Rolle',
'Roles': 'Rollen',
'Rows in Table': 'Tabellenreihen',
'Rows selected': 'Reihen ausgewählt',
'Save model as...': 'Speichere Vorlage als...',
'Semantic': 'Semantik',
'Services': 'Dienste',
'Sign Up': 'Sign Up',
'Size of cache:': 'Cachegröße:',
'state': 'Status',
'Statistics': 'Statistik',
'Stylesheet': 'Stylesheet',
'submit': 'Submit',
'Support': 'Support',
'Table': 'Tabelle',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Die "query" ist eine Bedingung wie "db.tabelle1.feld1==\'wert\'". So etwas wie "db.tabelle1.feld1==db.tabelle2.feld2" resultiert in einem SQL JOIN.',
'The Core': 'Der Core',
'The output of the file is a dictionary that was rendered by the view %s': 'Die Ausgabe der Datei ist ein "dictionary", welches vom "view" %s gerendert wurde',
'The Views': 'Die Views',
'This App': 'Diese App',
'This email already has an account': 'This email already has an account',
'Time in Cache (h:m:s)': 'Zeit im Cache (h:m:s)',
'Timestamp': 'Zeitstempel',
'Traceback': 'Traceback',
'Twitter': 'Twitter',
'unable to parse csv file': 'csv Datei konnte nicht geparst werden',
'Update:': 'Update:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Benutze (...)&(...) für AND, (...)|(...) für OR, und ~(...) für NOT um komplexere Queries zu erstellen.',
'User': 'Benutzer',
'User %(id)s Logged-in': 'Benutzer %(id)s hat sich eingeloggt',
'User %(id)s Logged-out': 'Benutzer %(id)s hat sich ausgeloggt',
'User %(id)s Registered': 'Benutzer %(id)s hat sich registriert',
'User ID': 'Benutzer ID',
'Users': 'Benutzer',
'value already in database or empty': 'Wert ist bereits in der Datenbank oder leer',
'Verify Password': 'Passwort überprüfen',
'Videos': 'Videos',
'View': 'Ansicht',
'Welcome': 'Willkommen',
'Welcome to web2py!': 'Willkommen bei web2py!',
'Which called the function %s located in the file %s': 'Welche die Funktion %s in der Datei %s aufrief',
'Working...': 'Arbeite...',
'You are successfully running web2py': 'web2py wird erfolgreich ausgeführt',
'You can modify this application and adapt it to your needs': 'Sie können diese Anwendung verändern und Ihren Bedürfnissen anpassen',
'You visited the url %s': 'Sie haben die URL %s besucht',
}
| 41.320574 | 291 | 0.683997 |
ab5a7a9a1d670882b4a4aa397ca34b58c845b340 | 5,889 | py | Python | src/convert_to_rdf.py | MaastrichtU-IDS/neuro_dkg | e61bf2354ce80b1036e0b1631bb4210593177aad | [
"MIT"
] | 1 | 2020-11-27T19:22:34.000Z | 2020-11-27T19:22:34.000Z | src/convert_to_rdf.py | MaastrichtU-IDS/neuro_dkg | e61bf2354ce80b1036e0b1631bb4210593177aad | [
"MIT"
] | null | null | null | src/convert_to_rdf.py | MaastrichtU-IDS/neuro_dkg | e61bf2354ce80b1036e0b1631bb4210593177aad | [
"MIT"
] | 1 | 2020-10-26T20:27:35.000Z | 2020-10-26T20:27:35.000Z | # -*- coding: utf-8 -*-
"""Copy_Convert_NeuroDKG.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1la848FqlwvvxMyW_zvNVfC5MkCFhS1n6
"""
# import for RDF knowledge
from rdflib import Graph, URIRef, Literal, RDF, ConjunctiveGraph
from rdflib import Namespace
from rdflib import Dataset
# import relevant libraries to be used for analysis
import pandas as pd
import os
import re
import pandas as pd
from io import BytesIO
# define predicates using uri
predicate_to_uri = {'is_a': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type',
'type': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type',
'drug': 'http://www.w3id.org/neurodkg:drug',
'disease': 'http://www.w3id.org/neurodkg:disease',
'targetGroup': 'http://www.w3id.org/neurodkg:targetGroup',
'hasTreatment': 'http://www.w3id.org/neurodkg:hasTreatment',
'hasSymptom': 'http://www.w3id.org/neurodkg:hasSymptom',
'hasComorbidity': 'http://www.w3id.org/neurodkg:hasComorbidity',
'hasCurrentMedication': 'http://www.w3id.org/neurodkg:hasCurrentMedication',
'hasMinAge': 'http://www.w3id.org/neurodkg:hasMinAge',
'hasAgeGroup': 'http://www.w3id.org/neurodkg:hasAgeGroup',
'treatmentDuration': 'http://www.w3id.org/neurodkg:treatmentDuration',
'drugTreatment': 'http://www.w3id.org/neurodkg:drugTreatment',
'responseStatus': 'http://www.w3id.org/neurodkg:responseStatus',
'hasTherapy': 'http://www.w3id.org/neurodkg:hasTherapy',
'contains': 'http://www.w3id.org/neurodkg:contains'}
# define objects using uri
object_to_uri = {'DrugDiseaseTargetGroup': 'http://www.w3id.org/neurodkg:DrugDiseaseTargetGroup',
'symptomatic_relief': 'http://www.w3id.org/neurodkg:SymptomaticRelief',
'indication': 'http://www.w3id.org/neurodkg:Indication',
'prevention': 'http://www.w3id.org/neurodkg:Prevention'}
# define namespace
graphID = 'SemanticTypes_2018AB'
UMLS = Namespace("http://www.w3id.org/umls/")
NEURO_INST = Namespace("http://www.w3id.org/neurodkg/Instances/")
RDFS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
RDF = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
BASE = Namespace("http://www.w3id.org/")
SKOS = Namespace("http://www.w3.org/2004/02/skos/core#")
prefix_dict = {'doid': 'http://www.w3id.org/doid/',
'umls': 'http://bioportal.bioontology.org/ontologies/umls/',
'dbid': 'http://www.w3id.org/drugbank:',
'ncit': 'http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#',
'meddra': 'http://purl.bioontology.org/ontology/MEDDRA/',
'ochv': 'http://sbmi.uth.tmc.edu/ontology/ochv#',
'iobc': 'http://purl.jp/bio/4/id/',
'ddo': 'http://purl.obolibrary.org/obo/DDO.owl#',
'snomedct': 'http://purl.bioontology.org/ontology/SNOMEDCT/',
'ogms': 'http://purl.obolibrary.org/obo/OGMS_',
'hp': 'http://purl.obolibrary.org/obo/HP_',
'symp': 'http://purl.obolibrary.org/obo/SYMP_',
'mondo': 'http://purl.obolibrary.org/obo/MONDO_',
'mfomd': 'http://purl.obolibrary.org/obo/MFOMD_',
'omim': 'http://purl.bioontology.org/ontology/OMIM/',
'chebi': 'http://purl.obolibrary.org/obo/CHEBI_',
'ontoad': 'http://purl.obolibrary.org/obo/OntoAD#',
'efo': 'http://www.ebi.ac.uk/efo/EFO_',
'icpc2p': 'http://purl.bioontology.org/ontology/ICPC2P/'}
if __name__ == '__main__':
df = pd.read_excel('data/neurodkg_triples.xlsx', sheet_name='Clean')
dataset = Dataset()
for index, row in df.iterrows():
subj = row['Subject'].replace(' ', '')
pred = row['Predicate']
obj = row['Object']
obj_id = row['Concept ID']
# if the predicate is targetGroup: remove the characters (\ |\>|\<|\-|\_|\;|\:) from the object names
if pred == 'targetGroup':
obj = re.sub('\ |\>|\<|\-|\_|\;|\:', '', obj)
# create triples containing subject (neurodkg instances), predicate (several are defined above), and object (neurodkg instances) and add them to the dataset
dataset.add((NEURO_INST[subj], URIRef(
predicate_to_uri[pred]), NEURO_INST[obj]))
# object id: differentiating between the cases of having a disease ID or not
elif str(obj_id) != 'nan':
print(obj_id)
curie = obj_id.replace(' ', '').split(':')
if len(curie) <= 1:
print(obj_id)
prefix = curie[0].lower()
obj_id = curie[1]
print(curie)
# if a disease ID was found, then add the ID and ontology as object of the triple
#obj_uri = BASE[prefix+':'+obj_id]
obj_uri = URIRef(prefix_dict[prefix]+obj_id)
dataset.add((NEURO_INST[subj], URIRef(
predicate_to_uri[pred]), obj_uri))
# if there was no disease ID in an ontology: use the disease label as object of the triple
dataset.add((obj_uri, RDFS['label'], Literal(obj)))
else:
if obj in object_to_uri:
obj_uri = object_to_uri[obj]
dataset.add((NEURO_INST[subj], URIRef(
predicate_to_uri[pred]), URIRef(obj_uri)))
else:
dataset.add((NEURO_INST[subj], URIRef(
predicate_to_uri[pred]), Literal(obj)))
print("---------", index)
# saving the dataset as a turtle file
dataset.serialize('data/output/neuro_dkg.ttl', format='turtle')
| 49.075 | 168 | 0.592291 |
eb0e00041caf7a0c89409490c3a5a21c8d9a503c | 5,380 | py | Python | GUI_Adquisicion/ULTRACORTEX_conexion.py | LASER-UD/Open_BCI_and_MYO_UD | f5e9aae94370c9edd7da9a38159bce71241eb043 | [
"MIT"
] | null | null | null | GUI_Adquisicion/ULTRACORTEX_conexion.py | LASER-UD/Open_BCI_and_MYO_UD | f5e9aae94370c9edd7da9a38159bce71241eb043 | [
"MIT"
] | 5 | 2021-06-02T01:50:42.000Z | 2022-03-12T00:29:55.000Z | GUI_Adquisicion/ULTRACORTEX_conexion.py | LASER-UD/Open_BCI_and_MYO_UD | f5e9aae94370c9edd7da9a38159bce71241eb043 | [
"MIT"
] | 3 | 2020-03-20T04:33:38.000Z | 2020-09-29T01:04:40.000Z | import sys
sys.path.append('C:/Python37/Lib/site-packages')
from IPython.display import clear_output
import csv
import os
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import random
from pyOpenBCI import OpenBCICyton
import threading
import time
import numpy as np
from scipy import signal
from pyOpenBCI import OpenBCICyton
fila= 0
pg.setConfigOption('background','w')# set white background
SCALE_FACTOR = (4500000)/24/(2**23-1) #From the pyOpenBCI repo
colors = 'rgbycmkr'
# Set up GUI Layout
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title='Python OpenBCI GUI')
ts_plots = [win.addPlot(row=i, col=0, colspan=2, title='Channel %d' % i, labels={'left': 'uV'}) for i in range(1,9)]
#fft_plot = win.addPlot(row=1, col=2, rowspan=4, title='FFT Plot', labels={'left': 'uV', 'bottom': 'Hz'})
#fft_plot.setLimits(xMin=1,xMax=125, yMin=0, yMax=1e7)
#waves_plot = win.addPlot(row=5, col=2, rowspan=4, title='EEG Bands', labels={'left': 'uV', 'bottom': 'EEG Band'})
#waves_plot.setLimits(xMin=0.5, xMax=5.5, yMin=0)
#waves_xax = waves_plot.getAxis('bottom')
#waves_xax.setTicks([list(zip(range(6), ('', 'Delta', 'Theta', 'Alpha', 'Beta', 'Gama')))])
data = [[0,0,0,0,0,0,0,0]]
# Define OpenBCI callback function
def save_data(sample):
global data
global fila
print("Data")
print(data)
data.append([i*SCALE_FACTOR for i in sample.channels_data])
# print("Datos Puros")
# print(data)
Guardar_Datos(data)
fila += 1
# Crear carpeta y archivo csv
def Crear_carpeta():
global carpeta
global j
Archivo = True
j = 1
Tipo = "Parpadeos"
carpeta = f"Base_Datos_{Tipo}" #Creacion de carpetas para guarda archivos si no existe
if not os.path.exists(carpeta):
os.mkdir(carpeta)
while(Archivo == True):# Se crea un archivo csv en caso de que no exista
if os.path.isfile(carpeta + "/datos %d.csv"% j):
print('El archivo existe.')
j+=1
else:
with open(os.path.join(carpeta, "datos %d.csv"% j), 'w') as fp:
[fp.write('CH%d ;'%i)for i in range(1,9)]
fp.write("\n")
print("Archivo Creado")
Archivo = False
# Almacenamiento datos csv
def Guardar_Datos(datos):
global fila
with open(os.path.join(carpeta, "datos %d.csv"% j), 'a') as fp: # Guardar datos en el archivo csv
for i in range(0,8):
fp.write(str(datos[fila][i])+";")
fp.write("\n")
# Define function to update the graphs
def updater():
global data, colors
t_data = np.array(data[-500:]).T #transpose data
fs = 250 #Hz
# Notch Filter
def notch_filter(val, data, fs=250):
notch_freq_Hz = np.array([float(val)])
for freq_Hz in np.nditer(notch_freq_Hz):
bp_stop_Hz = freq_Hz + 3.0 * np.array([-1, 1])
b, a = signal.butter(3, bp_stop_Hz / (fs / 2.0), 'bandstop')
fin = data = signal.lfilter(b, a, data)
return fin
# Bandpass filter
def bandpass(start, stop, data, fs = 250):
bp_Hz = np.array([start, stop])
b, a = signal.butter(5, bp_Hz / (fs / 2.0), btype='bandpass')
return signal.lfilter(b, a, data, axis=0)
# Applying the filters
nf_data = [[],[],[],[],[],[],[],[]]
bp_nf_data = [[],[],[],[],[],[],[],[]]
for i in range(8):
nf_data[i] = notch_filter(60, t_data[i])
bp_nf_data[i] = bandpass(7,13, t_data[i])
# Plot a time series of the raw data
for j in range(8):
ts_plots[j].clear()
ts_plots[j].plot(pen=colors[j]).setData(t_data[j])
# Get an FFT of the data and plot it
#sp = [[],[],[],[],[],[],[],[]]
#freq = [[],[],[],[],[],[],[],[]]
#fft_plot.clear()
# for k in range(8):
# sp[k] = np.absolute(np.fft.fft(bp_nf_data[k]))
#freq[k] = np.fft.fftfreq(bp_nf_data[k].shape[-1], 1.0/fs)
#fft_plot.plot(pen=colors[k]).setData(freq[k], sp[k])
# Define EEG bands
# eeg_bands = {'Delta': (1, 4),
# 'Theta': (4, 8),
# 'Alpha': (8, 12),
# 'Beta': (12, 30),
# 'Gamma': (30, 45)}
# Take the mean of the fft amplitude for each EEG band (Only consider first channel)
# eeg_band_fft = dict()
# sp_bands = np.absolute(np.fft.fft(t_data[1]))
# freq_bands = np.fft.fftfreq(t_data[1].shape[-1], 1.0/fs)
# for band in eeg_bands:
# freq_ix = np.where((freq_bands >= eeg_bands[band][0]) &
# (freq_bands <= eeg_bands[band][1]))[0]
# eeg_band_fft[band] = np.mean(sp_bands[freq_ix])
# Plot EEG Bands
#bg1 = pg.BarGraphItem(x=[1,2,3,4,5], height=[eeg_band_fft[band] for band in eeg_bands], width=0.6, brush='r')
#waves_plot.clear()
#waves_plot.addItem(bg1)
# Define thread function
def start_board():
board = OpenBCICyton(port='COM8', daisy=False)
board.start_stream(save_data)
# Initialize Board and graphing update
Crear_carpeta()
if __name__ == '__main__':
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
x = threading.Thread(target=start_board)
x.daemon = True
x.start()
timer = QtCore.QTimer()
timer.timeout.connect(updater)
timer.start(0)
QtGui.QApplication.instance().exec_()
| 32.606061 | 116 | 0.592379 |
66ec9b78430c361ae979ce109bc97b8a2e5377b5 | 2,744 | py | Python | fedot/core/operations/evaluation/common_preprocessing.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 358 | 2020-06-11T09:34:53.000Z | 2022-03-31T12:56:22.000Z | fedot/core/operations/evaluation/common_preprocessing.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 467 | 2020-06-11T13:49:45.000Z | 2022-03-31T14:19:48.000Z | fedot/core/operations/evaluation/common_preprocessing.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 48 | 2020-07-13T14:50:45.000Z | 2022-03-26T09:37:13.000Z | import warnings
from typing import Optional
from fedot.core.data.data import InputData
from fedot.core.operations.evaluation.evaluation_interfaces import EvaluationStrategy
from fedot.core.operations.evaluation.operation_implementations.data_operations.sklearn_transformations import \
ImputationImplementation, KernelPCAImplementation, NormalizationImplementation, OneHotEncodingImplementation, \
PCAImplementation, PolyFeaturesImplementation, ScalingImplementation
warnings.filterwarnings("ignore", category=UserWarning)
class FedotPreprocessingStrategy(EvaluationStrategy):
__operations_by_types = {
'scaling': ScalingImplementation,
'normalization': NormalizationImplementation,
'simple_imputation': ImputationImplementation,
'pca': PCAImplementation,
'kernel_pca': KernelPCAImplementation,
'poly_features': PolyFeaturesImplementation,
'one_hot_encoding': OneHotEncodingImplementation
}
def __init__(self, operation_type: str, params: Optional[dict] = None):
self.operation_impl = self._convert_to_operation(operation_type)
super().__init__(operation_type, params)
def fit(self, train_data: InputData):
"""
This method is used for operation training with the data provided
:param InputData train_data: data used for operation training
:return: trained Sklearn operation
"""
warnings.filterwarnings("ignore", category=RuntimeWarning)
if self.params_for_fit:
operation_implementation = self.operation_impl(**self.params_for_fit)
else:
operation_implementation = self.operation_impl()
operation_implementation.fit(train_data)
return operation_implementation
def predict(self, trained_operation, predict_data: InputData,
is_fit_pipeline_stage: bool):
"""
Transform method for preprocessing task
:param trained_operation: model object
:param predict_data: data used for prediction
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return:
"""
prediction = trained_operation.transform(predict_data,
is_fit_pipeline_stage)
# Convert prediction to output (if it is required)
converted = self._convert_to_output(prediction, predict_data)
return converted
def _convert_to_operation(self, operation_type: str):
if operation_type in self.__operations_by_types.keys():
return self.__operations_by_types[operation_type]
else:
raise ValueError(f'Impossible to obtain custom preprocessing strategy for {operation_type}')
| 42.215385 | 115 | 0.720845 |
18a1a5ef0fdef7d37d24ea55defadea4385ee924 | 1,312 | py | Python | LimpezaDeArquivosSellout.py | Weslei25/Limpeza_Arquivos | e2476ed0c3f8f03a8c8b5ceb740a09a7b80e7f0d | [
"MIT"
] | null | null | null | LimpezaDeArquivosSellout.py | Weslei25/Limpeza_Arquivos | e2476ed0c3f8f03a8c8b5ceb740a09a7b80e7f0d | [
"MIT"
] | null | null | null | LimpezaDeArquivosSellout.py | Weslei25/Limpeza_Arquivos | e2476ed0c3f8f03a8c8b5ceb740a09a7b80e7f0d | [
"MIT"
] | null | null | null | import time
import schedule
from config import *
from datetime import date
from functions.delete import delete_arquivos
from functions.controle_servico import iniciar_servico, parar_servico
from functions.lerconfs import ler_json
def job():
if __name__ == "__main__":
config = ler_json('config.json')
caminhos = dict(config[0])
servicos = list(config[1]['servicos'])
logging.info("Iniciando varredura de pastas")
try:
data_atual = date.today()
data_atual = data_atual.strftime('%d')
if data_atual == config[2]['dia_de_excluir_arquivos']:
parar_servico(servicos=servicos)
for caminho_arq in caminhos.values():
delete_arquivos(caminho=caminho_arq)
time.sleep(0.1)
iniciar_servico(servicos=servicos)
else:
logging.warning("Hoje não é a data para exluir os arquivos - {} O dia para exclusão é {}".format(data_atual, dia_de_excluir_arquivos))
except Exception as error:
logging.exception(error)
#schedule.every().day.at("08:00").do(verificacao_diaria)
schedule.every(0.1).minutes.do(job)
while True:
schedule.run_pending()
time.sleep(1)
| 32.8 | 150 | 0.621189 |
ea5da562bc7c7f5e6d197c49e522c6d84aee39ee | 18,947 | py | Python | models/resnest.py | Xiaziheng89/Spatial-Location-Constraint-Prototype-Loss-for-Open-Set-Recognition | 47ebf4b844bc2b0ba98761bd47a0bb4b7963e174 | [
"MIT"
] | 5 | 2021-11-09T09:12:19.000Z | 2021-12-11T12:34:30.000Z | models/resnest.py | Xiaziheng89/Spatial-Location-Constraint-Prototype-Loss-for-Open-Set-Recognition | 47ebf4b844bc2b0ba98761bd47a0bb4b7963e174 | [
"MIT"
] | 2 | 2021-12-10T13:12:26.000Z | 2022-01-10T10:10:23.000Z | models/resnest.py | Xiaziheng89/Adversarial-Motorial-Prototype-Framework-for-Open-Set-Recognition | 022b7e5a81360a8b448072999ecf2988b34055cc | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
from torch.nn import Conv2d, Module, Linear, BatchNorm2d, ReLU
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
__all__ = ['resnest50', 'resnest101', 'resnest200', 'resnest269']
_url_format = 'https://s3.us-west-1.wasabisys.com/resnest/torch/{}-{}.pth'
_model_sha256 = {name: checksum for checksum, name in [
('528c19ca', 'resnest50'),
('22405ba7', 'resnest101'),
('75117900', 'resnest200'),
('0cc87c48', 'resnest269'),
]}
def short_hash(name):
if name not in _model_sha256:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha256[name][:8]
resnest_model_urls = {name: _url_format.format(name, short_hash(name)) for
name in _model_sha256.keys()}
def resnest50(pretrained=False, num_classes=6, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes,
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50'], progress=True, check_hash=True))
return model
def resnest101(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest101'], progress=True, check_hash=True))
return model
def resnest200(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 24, 36, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest200'], progress=True, check_hash=True))
return model
def resnest269(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 30, 48, 8],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest269'], progress=True, check_hash=True))
return model
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
radix=1, cardinality=1, bottleneck_width=64,
avd=False, avd_first=False, dilation=1, is_first=False,
rectified_conv=False, rectify_avg=False,
norm_layer=None, dropblock_prob=0.0, last_gamma=False):
super(Bottleneck, self).__init__()
group_width = int(planes * (bottleneck_width / 64.)) * cardinality
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.dropblock_prob = dropblock_prob
self.radix = radix
self.avd = avd and (stride > 1 or is_first)
self.avd_first = avd_first
if self.avd:
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
stride = 1
if dropblock_prob > 0.0:
self.dropblock1 = DropBlock2D(dropblock_prob, 3)
if radix == 1:
self.dropblock2 = DropBlock2D(dropblock_prob, 3)
self.dropblock3 = DropBlock2D(dropblock_prob, 3)
if radix >= 1:
self.conv2 = SplAtConv2d(
group_width, group_width, kernel_size=3,
stride=stride, padding=dilation,
dilation=dilation, groups=cardinality, bias=False,
radix=radix, rectify=rectified_conv,
rectify_avg=rectify_avg,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif rectified_conv:
raise ValueError('Do not exist rectified_conv!!!') # 这个是我自己修改的!
# from rfconv import RFConv2d
# self.conv2 = RFConv2d(
# group_width, group_width, kernel_size=3, stride=stride,
# padding=dilation, dilation=dilation,
# groups=cardinality, bias=False,
# average_mode=rectify_avg)
# self.bn2 = norm_layer(group_width)
else:
self.conv2 = nn.Conv2d(
group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation,
groups=cardinality, bias=False)
self.bn2 = norm_layer(group_width)
self.conv3 = nn.Conv2d(
group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes*4)
if last_gamma:
from torch.nn.init import zeros_
zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.dropblock_prob > 0.0:
out = self.dropblock1(out)
out = self.relu(out)
if self.avd and self.avd_first:
out = self.avd_layer(out)
out = self.conv2(out)
if self.radix == 0:
out = self.bn2(out)
if self.dropblock_prob > 0.0:
out = self.dropblock2(out)
out = self.relu(out)
if self.avd and not self.avd_first:
out = self.avd_layer(out)
out = self.conv3(out)
out = self.bn3(out)
if self.dropblock_prob > 0.0:
out = self.dropblock3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet Variants
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
"""
# pylint: disable=unused-variable
def __init__(self, block, layers, radix=1, groups=1, bottleneck_width=64,
num_classes=200, dilated=False, dilation=1,
deep_stem=False, stem_width=64, avg_down=False,
rectified_conv=False, rectify_avg=False,
avd=False, avd_first=False,
final_drop=0.0, dropblock_prob=0,
last_gamma=False, norm_layer=nn.BatchNorm2d):
self.cardinality = groups
self.bottleneck_width = bottleneck_width
# ResNet-D params
self.inplanes = stem_width*2 if deep_stem else 64
self.avg_down = avg_down
self.last_gamma = last_gamma
# ResNeSt params
self.radix = radix
self.avd = avd
self.avd_first = avd_first
super(ResNet, self).__init__()
self.rectified_conv = rectified_conv
self.rectify_avg = rectify_avg
if rectified_conv:
# from rfconv import RFConv2d
# conv_layer = RFConv2d
raise ValueError('Do not exist rectified_conv!!!') # 这个是我自己修改的!
else:
conv_layer = nn.Conv2d
conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {}
if deep_stem:
self.conv1 = nn.Sequential(
conv_layer(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False, **conv_kwargs),
nn.BatchNorm2d(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs),
nn.BatchNorm2d(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width*2, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs),
)
else:
self.conv1 = conv_layer(3, 64, kernel_size=7, stride=2, padding=3, bias=False, **conv_kwargs)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer, is_first=False)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated or dilation == 4:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif dilation == 2:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilation=1, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.avgpool = GlobalAvgPool2d()
self.drop = nn.Dropout(final_drop) if final_drop > 0.0 else None
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None,
dropblock_prob=0.0, is_first=True):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1,
ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False))
down_layers.append(norm_layer(planes * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=1, is_first=is_first, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=2, is_first=is_first, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=dilation, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
return nn.Sequential(*layers)
def forward(self, x, return_feature=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
# x = x.view(x.size(0), -1)
feature = torch.flatten(x, 1)
if self.drop:
x = self.drop(feature)
x = self.fc(x)
if return_feature:
return feature, x
else:
return x
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return F.adaptive_avg_pool2d(inputs, 1).view(inputs.size(0), -1)
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError
# __all__ = ['SplAtConv2d']
class SplAtConv2d(Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias=True,
radix=2, reduction_factor=4,
rectify=False, rectify_avg=False, norm_layer=None,
dropblock_prob=0.0, **kwargs):
super(SplAtConv2d, self).__init__()
padding = _pair(padding)
self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
self.rectify_avg = rectify_avg
inter_channels = max(in_channels*radix//reduction_factor, 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, **kwargs)
self.use_bn = norm_layer is not None
if self.use_bn:
self.bn0 = norm_layer(channels*radix)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(inter_channels)
self.fc2 = Conv2d(inter_channels, channels*radix, 1, groups=self.cardinality)
if dropblock_prob > 0.0:
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn0(x)
if self.dropblock_prob > 0.0:
x = self.dropblock(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
if torch.__version__ < '1.5':
splited = torch.split(x, int(rchannel//self.radix), dim=1)
else:
splited = torch.split(x, rchannel//self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
if torch.__version__ < '1.5':
attens = torch.split(atten, int(rchannel//self.radix), dim=1)
else:
attens = torch.split(atten, rchannel//self.radix, dim=1)
out = sum([att*split for (att, split) in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
| 41.73348 | 116 | 0.569853 |
dbb83a845af1db140bd2fd71f32018da889bbfa9 | 203 | py | Python | testing.py | tacticalretard0/pylinalg | 8481927eb4e5ec1ed7ca3d2462054b325accd074 | [
"WTFPL"
] | null | null | null | testing.py | tacticalretard0/pylinalg | 8481927eb4e5ec1ed7ca3d2462054b325accd074 | [
"WTFPL"
] | null | null | null | testing.py | tacticalretard0/pylinalg | 8481927eb4e5ec1ed7ca3d2462054b325accd074 | [
"WTFPL"
] | null | null | null | # test file to make sure changes are working
import pylinalg.two as pyla2
import pylinalg.thr as pyla3
chungus = pyla3.Vec3(1, 0, 0)
matungus = pyla3.Mat4.make_rotateZ(90)
print(chungus * matungus)
| 16.916667 | 44 | 0.753695 |
a2681aa520509cb252bf24c1fa93f718f89aa721 | 88,848 | py | Python | Modules/Scripted/ShapeAnalysisToolBox/DataImporter.py | Kitware/Shape-AnaLysis-Toolbox-SALT- | 57a1eca1cc841d73cd1b8c4e2c526ecd8bb55ddf | [
"Apache-2.0"
] | 37 | 2017-03-10T18:59:17.000Z | 2021-12-21T15:52:22.000Z | Modules/Scripted/ShapeAnalysisToolBox/DataImporter.py | Kitware/Shape-AnaLysis-Toolbox-SALT- | 57a1eca1cc841d73cd1b8c4e2c526ecd8bb55ddf | [
"Apache-2.0"
] | 176 | 2017-03-17T20:46:15.000Z | 2022-03-25T15:39:06.000Z | Modules/Scripted/ShapeAnalysisToolBox/DataImporter.py | Kitware/Shape-AnaLysis-Toolbox-SALT- | 57a1eca1cc841d73cd1b8c4e2c526ecd8bb55ddf | [
"Apache-2.0"
] | 20 | 2017-02-21T18:27:48.000Z | 2021-12-21T16:43:19.000Z | import vtk, ctk, qt, slicer
from slicer.ScriptedLoadableModule import (ScriptedLoadableModule,
ScriptedLoadableModuleLogic,
ScriptedLoadableModuleWidget,
ScriptedLoadableModuleTest)
from collections import Counter
import csv
import logging
import os
from slicer.util import VTKObservationMixin
#
# DataImporter
#
class DataImporter(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Data Importer"
self.parent.categories = ["Shape Analysis Toolbox"]
self.parent.dependencies = []
self.parent.contributors = ["Mateo Lopez (UNC), Pablo Hernandez (Kitware Inc,), Hina Shah (Kitware Inc.)"]
self.parent.helpText = """
This module import label images and segmentations from files and folders and compute the topology number of each segment.
topologyNumber = cleanData.GetNumberOfPoints() - edges.GetNumberOfLines() + cleanData.GetNumberOfPolys()
"""
self.parent.acknowledgementText = """
This project is funded by NIBIB R01EB021391
""" # replace with organization, grant and thanks.
#
# DataImporterLogic
#
class DataImporterLogic(ScriptedLoadableModuleLogic):
TOPOLOGY_STRIP_TYPE = 0
TOPOLOGY_DISK_TYPE = 1
TOPOLOGY_SPHERE_TYPE = 2
TOPOLOGY_DOUBLE_TORUS_TYPE = -2
TOPOLOGY_TRIPLE_TORUS_TYPE = -4
TOPOLOGY_MULTIPLE_HOLES_TYPE = -9999
TOPOLOGY_TYPES = {
TOPOLOGY_STRIP_TYPE : 'Circle/Torus/Mobius Strip',
TOPOLOGY_DISK_TYPE : 'Disk',
TOPOLOGY_SPHERE_TYPE : 'Sphere',
TOPOLOGY_DOUBLE_TORUS_TYPE : 'Double Torus',
TOPOLOGY_TRIPLE_TORUS_TYPE : 'Triple Torus',
TOPOLOGY_MULTIPLE_HOLES_TYPE : 'Multiple Holes',
}
def __init__(self):
ScriptedLoadableModuleLogic.__init__(self)
self.saveCleanData = False
self.labelMapDict = {}
self.modelDict = {}
self.segmentationDict = {}
self.labelRangeInCohort = (-1, -1)
self.topologyDict = {}
self.polyDataDict = {}
# help variable to map continuous indices to TOPOLOGY_TYPES. Used in comboBoxes
self.topologyTypeToIndex = {
self.TOPOLOGY_STRIP_TYPE : 0,
self.TOPOLOGY_DISK_TYPE : 1,
self.TOPOLOGY_SPHERE_TYPE : 2,
self.TOPOLOGY_DOUBLE_TORUS_TYPE : 3,
self.TOPOLOGY_TRIPLE_TORUS_TYPE : 4,
self.TOPOLOGY_MULTIPLE_HOLES_TYPE : 5,
}
self.indexToTopologyType = {index: topologyType for topologyType, index in self.topologyTypeToIndex.items()}
self.expectedTopologiesBySegment = {}
self.inconsistentTopologyDict = {}
self.numberOfDifferentSegments = 0
self.dictSegmentNamesWithIntegers = dict()
self.TemplateName = ''
self.freesurfer_import = False
self.freesurfer_wanted_segments = []
self.expected_file_type = 'VolumeFile'
self.color_table_id = 'None'
def setSaveCleanData(self, save):
self.saveCleanData = save
#
# Reset all the data for data import
#
def cleanup(self):
logging.debug('Deleting nodes')
if self.labelMapDict is not None:
for nodeName in self.labelMapDict:
logging.debug('Deleting label map node: ' + nodeName)
slicer.mrmlScene.RemoveNode(self.labelMapDict[nodeName])
if self.modelDict is not None:
for nodeName in self.modelDict:
logging.debug('Deleting model node: ' + nodeName)
slicer.mrmlScene.RemoveNode(self.modelDict[nodeName])
if self.segmentationDict is not None:
for nodeName in self.segmentationDict:
logging.debug('Deleting segmentation node: ' + nodeName)
slicer.mrmlScene.RemoveNode(self.segmentationDict[nodeName])
self.labelMapDict = {}
self.modelDict = {}
self.segmentationDict = {}
self.labelRangeInCohort = (-1, -1)
self.topologyDict = {}
self.polyDataDict = {}
self.expectedTopologiesBySegment = {}
self.inconsistentTopologyDict = {}
self.TemplateName = ''
self.numberOfDifferentSegments = 0
self.dictSegmentNamesWithIntegers = dict()
def __del__(self):
self.cleanup()
def checkLabelRangeConsistency(self, inputNumberOfSegments):
"""
Check that the input number of segments is the same than the current number of segments in the cohort.
Return tuple [boolean, labelRange].
boolean is false if not consistent with current self.labelRangeInCohort. True otherwise.
labelRange is (0, inputNumberOfSegments)
"""
labelRange = (0, inputNumberOfSegments)
if self.labelRangeInCohort != (-1, -1) and labelRange != self.labelRangeInCohort:
logging.error('Label range {} does not match with the existing label range in cohort {}.'.format(labelRange, self.labelRangeInCohort))
return False, labelRange
return True, labelRange
def importLabelMap(self, path):
"""
Populate labelMapDict, segmentationDict, labelRangeInCohort
Fails if number of labels is different than pre-existing value for labelRangeInCohort
Returns false if errors, and no class variable is modified.
"""
directory, fileName = os.path.split(path)
labelMapNode = slicer.util.loadLabelVolume(path, returnNode=True)[1]
if labelMapNode is None:
logging.error('Failed to load ' + fileName + 'as a labelmap')
# make sure each one is a labelmap
return False
file_name = os.path.splitext(fileName)[0]
if self.freesurfer_import == True:
subject_name = os.path.split(os.path.split(directory)[0])[1]
segmentationNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode", subject_name+' '+file_name)
else:
segmentationNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode", labelMapNode.GetName())
segmentationLogic = slicer.modules.segmentations.logic()
segmentationLogic.ImportLabelmapToSegmentationNode(labelMapNode,
segmentationNode)
labelMapNode.SetDisplayVisibility(False)
segmentationNode.SetDisplayVisibility(False)
segmentationNode.GetDisplayNode().SetAllSegmentsVisibility(False)
# if importing from freesurfer
if self.freesurfer_import == True:
to_remove_ids = []
freesurfer_found_segments = []
for segmentIndex in range(segmentationNode.GetSegmentation().GetNumberOfSegments()):
segmentId = segmentationNode.GetSegmentation().GetNthSegmentID(segmentIndex)
segmentName = segmentationNode.GetSegmentation().GetSegment(segmentId).GetName()
if segmentName not in self.freesurfer_wanted_segments:
to_remove_ids.append(segmentId)
else:
freesurfer_found_segments.append(segmentName)
label_id = segmentId.split('_')[-1]
seg_name = self.freesurfer_lut_dict[label_id]['name']
color = self.freesurfer_lut_dict[label_id]['color']
segment = segmentationNode.GetSegmentation().GetSegment(segmentId)
segment_name = seg_name
segment.SetName(segment_name)
# segment.SetName(seg_name)
segment.SetColor(color)
if len(freesurfer_found_segments) != len(self.freesurfer_wanted_segments):
unpresent_segments = self.freesurfer_wanted_segments[:]
for seg in freesurfer_found_segments:
del unpresent_segments[unpresent_segments.index(seg)]
unpresent_segments = map(lambda x: self.freesurfer_lut_dict[x.split('_')[-1]]['name'], unpresent_segments)
logging.warning('Unable to find all segments, {} have not been found.'.format(unpresent_segments))
logging.warning('LabelMap in path: {} has not been loaded into segmentationDict.'.format(path))
return False
for segmentId in to_remove_ids:
segmentationNode.GetSegmentation().RemoveSegment(segmentId)
elif self.color_table_id != 'None':
segment_number = segmentationNode.GetSegmentation().GetNumberOfSegments()
color_node = slicer.util.getNode(pattern=self.color_table_id)
if (segment_number > 1):
for segmentIndex in range(segment_number):
segmentId = segmentationNode.GetSegmentation().GetNthSegmentID(segmentIndex)
label_id = int(segmentId.split('_')[-1])
color = [.0, .0, .0, .0]
color_node.GetColor(label_id, color)
segment_name = color_node.GetColorName(label_id)
segment = segmentationNode.GetSegmentation().GetSegment(segmentId)
segment.SetName(segment_name)
segment.SetColor(color[:3])
elif (segment_number==1):
segmentId = segmentationNode.GetSegmentation().GetNthSegmentID(0)
color = [.0, .0, .0, .0]
color_node.GetColor(1, color)
segment_name = color_node.GetColorName(1)
segment = segmentationNode.GetSegmentation().GetSegment(segmentId)
segment.SetName(segment_name)
segment.SetColor(color[:3])
closedSurface = segmentationNode.CreateClosedSurfaceRepresentation()
if closedSurface is False:
logging.error('Failed to create closed surface representation for filename: {}.'.format(path))
return False
labelRangeConsistent, labelRange = self.checkLabelRangeConsistency(segmentationNode.GetSegmentation().GetNumberOfSegments())
if not labelRangeConsistent:
logging.warning('LabelMap in path: {} has not been loaded into segmentationDict.'.format(path))
return False
# Add to the dicts only if succesful
if self.freesurfer_import == True:
subject_name = os.path.split(os.path.split(directory)[0])[1]
file_name = os.path.splitext(fileName)[0]
name = subject_name + ' ' + file_name
self.labelMapDict[name] = labelMapNode
self.segmentationDict[name] = segmentationNode
self.labelRangeInCohort = labelRange
else:
self.labelMapDict[fileName] = labelMapNode
self.segmentationDict[fileName] = segmentationNode
self.labelRangeInCohort = labelRange
return True
def importModel(self, path):
"""
Create segmentation from a model (with only one shape). The labelRangeInCohort would be (0,1), just one segment.
If your model is a model hierarchy (containing different shapes in the same file), use
importModelHierarchy (not implemented).
Populate segmentationDict and set labelRangeInCohort to (0,1)
"""
directory, fileName = os.path.split(path)
modelNode = slicer.util.loadModel(path, returnNode=True)[1]
if modelNode is None:
logging.error('Failed to load ' + fileName + 'as a model')
return False
modelNode.SetDisplayVisibility(False)
segmentationNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode", modelNode.GetName() + '_allSegments')
segmentationLogic = slicer.modules.segmentations.logic()
segmentationLogic.ImportModelToSegmentationNode(modelNode, segmentationNode)
# To allow better mixing with label maps.
# We change the name of the model (originally set to the filename in vtkSlicerSegmentationModuleLogic)
# XXX Better option would be to use terminologies, see: https://discourse.slicer.org/t/finding-corresponding-segments-in-segmentations/4055/4
file_name = os.path.splitext(fileName)[0]
segmentationNode.GetSegmentation().GetSegment(modelNode.GetName()).SetName(file_name + ' 1')
closedSurface = segmentationNode.CreateClosedSurfaceRepresentation()
segmentationNode.SetDisplayVisibility(False)
# segmentationNode.GetDisplayNode().SetAllSegmentsVisibility(False)
if closedSurface is False:
logging.error('Failed to create closed surface representation for filename: {}.'.format(path))
return False
labelRangeConsistent, labelRange = self.checkLabelRangeConsistency(segmentationNode.GetSegmentation().GetNumberOfSegments())
if not labelRangeConsistent:
logging.warning('Model in path: {} has not been loaded into segmentationDict.'.format(path))
return False
# Add to the dicts only if succesful
self.modelDict[fileName] = modelNode
self.segmentationDict[fileName] = segmentationNode
self.labelRangeInCohort = labelRange
return True
def importSegmentation(self, path):
"""
Populate segmentationDict, labelRangeInCohort
Fails if number of labels is different than pre-existing value for labelRangeInCohort
Returns false if errors, and no class variable is modified.
"""
directory, fileName = os.path.split(path)
segmentationNode = slicer.util.loadSegmentation(path, returnNode=True)[1]
if segmentationNode is None:
logging.error('Failed to load ' + fileName + 'as a segmentation')
return False
segmentationNode.SetDisplayVisibility(False)
# segmentationNode.GetDisplayNode().SetAllSegmentsVisibility(False)
labelRangeConsistent, labelRange = self.checkLabelRangeConsistency(segmentationNode.GetSegmentation().GetNumberOfSegments())
if not labelRangeConsistent:
logging.warning('Segmentation in path: {} has not been loaded into segmentationDict.'.format(path))
return False
# Add to the dicts only if succesful
self.segmentationDict[fileName] = segmentationNode
self.labelRangeInCohort = labelRange
return True
def filePathsFromCSVFile(self, csvFileName):
"""
Return filePaths from CSV.
It assumes that csvFileName contains one filepath per row.
"""
filePaths = []
with open(csvFileName, 'r') as csvfile:
has_header = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0) # Rewind
reader = csv.reader(csvfile)
# ignore the header
if has_header:
next(reader, None)
# assuming that each row is just a file path.
for row in reader:
if len(row) > 0:
filePaths.append(row[0])
return filePaths
# Depending on the mode fill the structures table.
# TODO: add directory parsing based on mode
# else:
# logging.error("Importing from directory is not yet supported")
def importFiles(self, filePaths):
"""
Call the appropiate import function from a heteregeneous list of file paths.
Raises TypeError if not existent file or unhandled filetype by this module.
Files with a different number of labels/segments than the first one loaded are ignored with a warning.
Return true if success, raise error otherwise.
"""
self.found_segments = []
for path in filePaths:
fileType = slicer.app.ioManager().fileType(path)
logging.debug("Path [{}] has file type [{}]".format(path, fileType))
if fileType == 'VolumeFile':
if self.expected_file_type == 'None' or self.expected_file_type == fileType:
self.importLabelMap(path)
else:
logging.debug("Path [{}] ignored, expected file type is [{}]".format(path, self.expected_file_type))
elif fileType == 'SegmentationFile':
if self.expected_file_type == 'None' or self.expected_file_type == fileType:
self.importSegmentation(path)
else:
logging.debug("Path [{}] ignored, expected file type is [{}]".format(path, self.expected_file_type))
elif fileType == 'ModelFile':
if self.expected_file_type == 'None' or self.expected_file_type == fileType:
self.importModel(path)
else:
logging.debug("Path [{}] ignored, expected file type is [{}]".format(path, self.expected_file_type))
elif fileType == 'NoFile':
raise TypeError("Path [{}] is not existent or has an unknown file type for Slicer [{}]".format(path, fileType))
else:
raise TypeError("Path [{}] has file type [{}], but this module does not handle it".format(path, fileType))
return True
def _computeModeOfSegment(self, inputTopologyDict, inputSegmentName):
"""
Compute the mode of the segmentName among the population
Raise error if input dict is empty or not nested.
Returns the mode value, or
None if inputSegmentName is not found in the dictionary.
Example::
{
name0:
{'segmentName0': '0', 'segmentName1': '1'},
name1:
{'segmentName0': '1', 'segmentName1': '0'}
name2:
{'segmentName0': '1', 'segmentName1': '0'}
}
It would return '1' if inputSegmentName == 'segmentName0'
'0' if inputSegmentName == 'segmentName1'
"""
# Check is a nested dictionary
if not isinstance(inputTopologyDict[next(iter(inputTopologyDict))], dict):
raise ValueError('Input is not a nested dictionary', inputTopologyDict)
# Use the first key...
segmentTopologies = list()
for name in inputTopologyDict:
if inputSegmentName in inputTopologyDict[name]:
segmentTopologies.append(inputTopologyDict[name][inputSegmentName])
if not segmentTopologies:
logging.warning('There is no segments with segmentName {} in input dict {}.'.format(inputSegmentName, inputTopologyDict))
return None
# dev: in most_common elements with equal counts are ordered arbitrarily
return Counter(segmentTopologies).most_common(1)[0][0]
def initExpectedTopologyBySegmentWithModes(self, inputTopologyDictionary):
"""
Compute the mode of each segment, populating the dict:
Example::
{'segmentName0' : 2, 'segmentName1': 0'}
Where the integers correspond to the enum TOPOLOGY_TYPES
"""
self.expectedTopologiesBySegment = {}
segmentNames = set()
for name in inputTopologyDictionary:
for segmentName in inputTopologyDictionary[name]:
segmentNames.add(segmentName)
for segmentName in segmentNames:
topologyType = self._computeModeOfSegment(inputTopologyDictionary, segmentName)
validTopologyType = (topologyType in self.TOPOLOGY_TYPES)
if not validTopologyType:
logging.warning("Topology: [{}] for segmentName: '{}', shows multiple holes. Use a key from {}".format(topologyType, segmentName, self.TOPOLOGY_TYPES))
topologyType = self.TOPOLOGY_MULTIPLE_HOLES_TYPE
self.expectedTopologiesBySegment[segmentName] = int(topologyType)
def initExpectedTopologyBySubjectTemplate(self, inputTopologyDictionary, templateName):
"""
Populate the dict from the selected template subject:
Example::
{'segmentName0' : 2, 'segmentName1': 0'}
Where the integers correspond to the enum TOPOLOGY_TYPES
"""
self.expectedTopologiesBySegment = {}
segmentNames = set()
for name in inputTopologyDictionary:
for segmentName in inputTopologyDictionary[name]:
segmentNames.add(segmentName)
for segmentName in segmentNames:
if segmentName in inputTopologyDictionary[templateName]:
topologyType = inputTopologyDictionary[templateName][segmentName]
else:
topologyType = self.TOPOLOGY_MULTIPLE_HOLES_TYPE
validTopologyType = (topologyType in self.TOPOLOGY_TYPES)
if not validTopologyType:
logging.warning("Topology: [{}] for segmentName: '{}', shows multiple holes. Use a key from {}".format(topologyType, segmentName, self.TOPOLOGY_TYPES))
topologyType = self.TOPOLOGY_MULTIPLE_HOLES_TYPE
self.expectedTopologiesBySegment[segmentName] = int(topologyType)
def setFreeSurferimport(self, bool):
self.freesurfer_import = bool
def setExpectedFileType(self, filetype):
self.expected_file_type = filetype
def setColorTableId(self, id):
self.color_table_id = id
#
# Function to estimate topology of segmentations, and check for consistencies.
#
def populateTopologyDictionary(self):
"""
PRE: Requires segmentationDict populated from files with importXXX
POST: populate topologyDict, polyDataDict
return void
Note that this is independent of labelRangeInCohort, the keys of the two level dictionary would be:
[nodeName][SegmentName]
SegmentName might not be alphanumerical, create a map self.dictSegmentNamesWithIntegers
between strings and ints.
"""
# Create vtk objects that will be used to clean the geometries
for nodeName in self.segmentationDict:
# Topology table is a dictionary of dictionaries.
self.topologyDict[nodeName] = {}
self.polyDataDict[nodeName] = {}
segmentationNode = self.segmentationDict[nodeName]
for segmentIndex in range(segmentationNode.GetSegmentation().GetNumberOfSegments()):
segmentId = segmentationNode.GetSegmentation().GetNthSegmentID(segmentIndex)
segmentName = segmentationNode.GetSegmentation().GetSegment(segmentId).GetName()
# 0 label is assumed to be the background. XXX Pablo: assumed where?
if segmentName == "0":
continue
polydata = segmentationNode.GetClosedSurfaceRepresentation(segmentId)
if polydata is None:
logging.warning('Ignoring segment id ' + segmentName + ' for case: ' + nodeName)
continue
polydataCleaner = vtk.vtkCleanPolyData()
connectivityFilter = vtk.vtkPolyDataConnectivityFilter()
extractEdgeFilter = vtk.vtkExtractEdges()
# clean up polydata
polydataCleaner.SetInputData(polydata)
polydataCleaner.Update()
cleanData = polydataCleaner.GetOutput()
# Get the largest connected component
connectivityFilter.SetInputData(cleanData)
connectivityFilter.SetExtractionModeToLargestRegion()
connectivityFilter.SetScalarConnectivity(0)
connectivityFilter.Update()
largestComponent = connectivityFilter.GetOutput()
# Clean the largest component to get rid of spurious points
polydataCleaner.SetInputData(largestComponent)
polydataCleaner.Update()
cleanData = polydataCleaner.GetOutput()
# run extract edge filter
extractEdgeFilter.SetInputData(cleanData)
extractEdgeFilter.Update()
edges = extractEdgeFilter.GetOutput()
# calculate the numbers
topologyNumber = cleanData.GetNumberOfPoints() - edges.GetNumberOfLines() + cleanData.GetNumberOfPolys()
self.topologyDict[nodeName][segmentName] = topologyNumber
if self.saveCleanData:
self.polyDataDict[nodeName][segmentName] = cleanData
else:
self.polyDataDict[nodeName][segmentName] = polydata
del edges
del largestComponent
del cleanData
def populateInconsistentTopologyDict(self):
"""
PRE: Requires topologyDict to be populated
Uses checkTopologyConsistency to populate self.inconsistentTopologyDict
"""
if not self.topologyDict:
logging.error('Topology Dict is not populated')
return
consistent, self.inconsistentTopologyDict = self.checkTopologyConsistency(self.topologyDict)
return consistent, self.inconsistentTopologyDict
def populateDictSegmentNamesWithIntegers(self):
"""
Populate numberOfDifferentSegments and dictSegmentNamesWithIntegers from existing topologyDict.
"""
if self.topologyDict is None:
logging.warning("Cannot populate dictSegmentNamesWithIntegers without topologyDict")
return
self.numberOfDifferentSegments = 0
for nodeName in self.topologyDict:
for segmentName in self.topologyDict[nodeName]:
if not segmentName in self.dictSegmentNamesWithIntegers:
self.numberOfDifferentSegments+=1
self.dictSegmentNamesWithIntegers[segmentName] = self.numberOfDifferentSegments
def checkTopologyConsistency(self, inputTopologyDictionary):
"""
Return list with (boolean, dict of dicts of inconsistent entries: { nodeName: {segmentName, inconsistentTopology} } )
the boolean reflects existence of inconsistencies
It uses the dictionary expectedTopologiesBySegment. If empty, it automatically init it computing the mode by segment.
"""
if not self.expectedTopologiesBySegment:
if self.TemplateName == '' or not self.TemplateName in inputTopologyDictionary:
self.initExpectedTopologyBySegmentWithModes(inputTopologyDictionary)
else:
self.initExpectedTopologyBySubjectTemplate(inputTopologyDictionary, self.TemplateName)
inconsistenciesExist = False
inconsistentSegments = {}
for nameNode, segmentsDict in inputTopologyDictionary.items():
for segmentName, topologyType in segmentsDict.items():
if topologyType != self.expectedTopologiesBySegment[segmentName]:
if not nameNode in inconsistentSegments:
inconsistentSegments[nameNode] = {}
inconsistentSegments[nameNode][segmentName] = topologyType
if inconsistentSegments:
inconsistenciesExist = True
return (inconsistenciesExist, inconsistentSegments)
def getLabelRangeInCohort(self):
return self.labelRangeInCohort
def getTopologyString(self, nodeName, inputSegmentName):
segmentName = str(inputSegmentName)
topologyString = 'n/a'
if nodeName in self.topologyDict and segmentName in self.topologyDict[nodeName]:
topologyNum = self.topologyDict[nodeName][segmentName]
if not topologyNum in self.TOPOLOGY_TYPES:
topologyString = str(topologyNum) + ': '
topologyString += self.TOPOLOGY_TYPES[self.TOPOLOGY_MULTIPLE_HOLES_TYPE]
else:
topologyString = self.TOPOLOGY_TYPES[topologyNum]
return topologyString
def getConsistencyString(self, nodeName, inputSegmentName):
"""
Return 'Consistent' or 'Inconsistent' depending if the nodeName and segmentName are in
the inconsistentTopologyDict.
"""
segmentName = str(inputSegmentName)
consistentTopologyString = 'Consistent'
if nodeName in self.inconsistentTopologyDict:
if segmentName in self.inconsistentTopologyDict[nodeName]:
consistentTopologyString = 'Inconsistent'
return consistentTopologyString
def getTopologyAndConsistencyString(self, nodeName, inputSegmentName):
"""
Return strings with topology type and consistency of a segment.
"""
return self.getTopologyString(nodeName, inputSegmentName), self.getConsistencyString(nodeName, inputSegmentName)
#
# FreeSurfer tab functions
#
def initFreeSurferLUT(self, LUT_path):
#import labels LUT
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.freesurfer_lut_dict = dict()
with open(LUT_path, 'r') as LUT:
line = LUT.readline()
while line:
filtered_line = list(filter(None, line.split(' ')))
if len(filtered_line) > 0 and filtered_line[0][0] in numbers:
color = [int(filtered_line[2]) / 255.0, int(filtered_line[3]) / 255.0, int(filtered_line[4]) / 255.0]
self.freesurfer_lut_dict[filtered_line[0]] = {'name': filtered_line[1], 'color': color}
line = LUT.readline()
def getFreeSurferAvailableSegmentIds(self, template_path):
# check available labels for ONE subject,
# we assume that there is a consistency across the analysable labels in the freesurfer pipeline
# additional labels correspond to anomalies
label_ids = []
directory, fileName = os.path.split(template_path)
labelMapNode = slicer.util.loadLabelVolume(template_path, returnNode=True)[1]
labelMapNode.SetDisplayVisibility(False)
if labelMapNode is None:
logging.error('Failed to load ' + fileName + 'as a labelmap')
# make sure each one is a labelmap
return False
segmentationNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode", labelMapNode.GetName() + '_allSegments')
segmentationLogic = slicer.modules.segmentations.logic()
segmentationLogic.ImportLabelmapToSegmentationNode(labelMapNode, segmentationNode)
labelMapNode.SetDisplayVisibility(False)
segmentationNode.SetDisplayVisibility(False)
segmentationNode.GetDisplayNode().SetAllSegmentsVisibility(False)
for segmentIndex in range(segmentationNode.GetSegmentation().GetNumberOfSegments()):
segmentId = segmentationNode.GetSegmentation().GetNthSegmentID(segmentIndex)
segmentName = segmentationNode.GetSegmentation().GetSegment(segmentId).GetName()
label_id = segmentName.split('_')[1]
label_ids.append(label_id)
slicer.mrmlScene.RemoveNode(segmentationNode)
slicer.mrmlScene.RemoveNode(labelMapNode)
return label_ids
#
# Shape analysis structure
#
def generateShapeAnlaysisStructure(self, save_path):
segmentationLogic = slicer.modules.segmentations.logic()
for name, segmentation_node in self.segmentationDict.items():
for segmentIndex in range(segmentation_node.GetSegmentation().GetNumberOfSegments()):
segmentId = segmentation_node.GetSegmentation().GetNthSegmentID(segmentIndex)
segmentName = segmentation_node.GetSegmentation().GetSegment(segmentId).GetName()
directory_path = os.path.join(save_path, segmentName)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
input_directory_path = os.path.join(directory_path, 'input')
if not os.path.isdir(input_directory_path):
os.mkdir(input_directory_path)
volume_directory_path = os.path.join(input_directory_path, 'volume')
if not os.path.isdir(volume_directory_path):
os.mkdir(volume_directory_path)
model_directory_path = os.path.join(input_directory_path, 'model')
if not os.path.isdir(model_directory_path):
os.mkdir(model_directory_path)
output_directory_path = os.path.join(directory_path, 'output')
if not os.path.isdir(output_directory_path):
os.mkdir(output_directory_path)
labelMap_filename = segmentation_node.GetName().replace(" ", "_")+'.nrrd'
labelMap_filepath = os.path.join(volume_directory_path, labelMap_filename)
polydata_filename = segmentation_node.GetName().replace(" ", "_")+'.vtk'
polydata_filepath = os.path.join(model_directory_path, polydata_filename)
segmentIdList = vtk.vtkStringArray()
segmentIdList.InsertNextValue(segmentId)
full_segmentName = segmentation_node.GetName() + segmentName
# save label map
exported_labelmap = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode", full_segmentName+' LabelMap')
if name in self.labelMapDict.keys():
segmentationLogic.ExportSegmentsToLabelmapNode(segmentation_node, segmentIdList, exported_labelmap, self.labelMapDict[name])
else:
segmentationLogic.ExportSegmentsToLabelmapNode(segmentation_node, segmentIdList, exported_labelmap)
slicer.util.saveNode(exported_labelmap, labelMap_filepath)
slicer.mrmlScene.RemoveNode(slicer.util.getNode(pattern=full_segmentName+' LabelMap_ColorTable'))
slicer.mrmlScene.RemoveNode(exported_labelmap)
# save Polydata
exported_hierarchy = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLModelHierarchyNode", full_segmentName+' Model')
segmentationLogic.ExportSegmentsToModelHierarchy(segmentation_node, segmentIdList, exported_hierarchy)
collec = vtk.vtkCollection()
exported_hierarchy.GetChildrenModelNodes(collec)
exported_model = collec.GetItemAsObject(0)
slicer.util.saveNode(exported_model, polydata_filepath)
slicer.mrmlScene.RemoveNode(exported_hierarchy)
slicer.mrmlScene.RemoveNode(exported_model)
# create output directory
if not os.path.isdir(output_directory_path):
os.mkdir(output_directory_path)
#
# DataImporterWidget
#
class DataImporterWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self)
def resetGlobalVariables(self):
self.logic.cleanup()
self.logic = DataImporterLogic()
self.filteredFilePathsList = list()
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#
# Global variables
#
self.logic = DataImporterLogic()
self.filteredFilePathsList = list()
self.tableWidgetItemDefaultFlags = qt.Qt.NoItemFlags | qt.Qt.ItemIsSelectable | qt.Qt.ItemIsEnabled
self.displayOnClick = True
self.TemplateButtonLookup = {}
# Table columns
self.subjectsColumnName = 0
self.subjectsColumnConsistency = 1
self.subjectsColumnTemplate = 2
# Note that these values change on initSegmentsTable/initSegmentsMultiTable
self.segmentsColumnSubjectName = -1
self.segmentsColumnSegmentName = 0
self.segmentsColumnTopologyCurrent = 1
self.segmentsColumnTopologyExpected = 2
# get available color tables
self.color_table_dict = dict()
scene = slicer.mrmlScene
count = scene.GetNumberOfNodes()
for idx in range(count):
node = scene.GetNthNode(idx)
node_type = node.GetClassName()
name = node.GetName()
id = node.GetID()
if node_type == 'vtkMRMLColorTableNode':
self.color_table_dict[name] = id
# Load widget from .ui file (created by Qt Designer)
uiWidget = slicer.util.loadUI(self.resourcePath('%s.ui' % self.moduleName))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
# Qtabwidget
self.ImporterTypeTabWidget = self.ui.ImporterTypeTabWidget
self.ImporterTypeTabWidget.setCurrentIndex(1)
self.ImporterTypeTabWidget.connect('currentChanged(int)', self.onCurrentTabChanged)
self.ImporterTypeTabWidget.setCurrentIndex(0)
# Browse Directory Button
self.InputFolderNameLineEdit = self.ui.InputFolderNameLineEdit
self.FolderDirectoryButton = self.ui.FolderDirectoryButton
self.FolderDirectoryButton.connect('directoryChanged(QString)', self.onDirectoryChanged)
self.InputFileTypeSelection = self.ui.InputFileTypeSelection
self.InputFileTypeSelection.connect('currentIndexChanged(QString)', self.onFileTypeSelectionChanged)
# populate file type combobox
self.InputFileTypeSelection.addItem('Volume File')
self.InputFileTypeSelection.addItem('Model File')
#self.InputFileTypeSelection.addItem('Segmentation File')
#populate colortable combobox
self.InputFolderColorTableSelection = self.ui.InputFolderColorTableSelection
self.InputFolderColorTableSelection.addItem('None')
for name in self.color_table_dict.keys():
self.InputFolderColorTableSelection.addItem(name)
self.InputFolderColorTableSelection.connect('currentIndexChanged(QString)', self.onColorTableSelectionChanged)
# Browse CSV Button
self.InputCSVFileNameLineEdit = self.ui.InputCSVFileNameLineEdit
self.CSVBrowseFilePushButton = self.ui.CSVBrowseFilePushButton
self.CSVBrowseFilePushButton.setIcon(qt.QApplication.style().standardIcon(qt.QStyle.SP_DirIcon))
self.CSVBrowseFilePushButton.connect('clicked(bool)', self.onClickCSVBrowseFilePushButton)
# populate colortable combobox
self.InputCSVColorTableSelection = self.ui.InputCSVColorTableSelection
self.InputCSVColorTableSelection.addItem('None')
for name in self.color_table_dict.keys():
self.InputCSVColorTableSelection.addItem(name)
self.InputCSVColorTableSelection.connect('currentIndexChanged(QString)', self.onColorTableSelectionChanged)
self.onColorTableSelectionChanged('None')
# FreeSurfer Tab
self.freesurferFilesOfInterest = dict()
self.freesurferFilesOfInterest['aseg'] = os.path.normpath("mri/aseg.mgz")
self.freesurferFilesOfInterest['aparc+aseg'] = os.path.normpath("mri/aparc+aseg.mgz")
self.freesurferFilesOfInterest['aparc.a2009s+aseg'] = os.path.normpath("mri/aparc.a2009s+aseg.mgz")
# home directory
self.InputFreeSurferHomeFolderNameLineEdit = self.ui.InputFreeSurferHomeFolderNameLineEdit
self.FreeSurferBrowseHomeFolderPushButton = self.ui.FreeSurferBrowseHomeFolderPushButton
self.FreeSurferBrowseHomeFolderPushButton.connect('directoryChanged(QString)', self.onFreeSurferHomeDirectoryChanged)
# subjects directory
self.InputFreeSurferSubjectsFolderNameLineEdit = self.ui.InputFreeSurferSubjectsFolderNameLineEdit
self.FreeSurferBrowseSubjectsFolderPushButton = self.ui.FreeSurferBrowseSubjectsFolderPushButton
self.FreeSurferBrowseSubjectsFolderPushButton.connect('directoryChanged(QString)', self.onFreeSurferSubjectsDirectoryChanged)
# File Select
self.InputFreeSurferFileSelection = self.ui.InputFreeSurferFileSelection
self.InputFreeSurferFileSelection.connect('currentIndexChanged(QString)', self.onFreeSurferFileSelectionChanged)
# FreeSurfer Subjects table
self.InputFreeSurferSubjectsTable = self.ui.InputFreeSurferSubjectsTable
self.FreeSurferImportAllSubjectsOption = self.ui.FreeSurferImportAllSubjectsOption
self.FreeSurferImportAllSubjectsOption.stateChanged.connect(self.onStateChangedFreeSurferImportAllSubjectsOption)
self.onStateChangedFreeSurferImportAllSubjectsOption_is_running = False
# FreeSurfer Segments table
self.InputFreeSurferSegmentsTable = self.ui.InputFreeSurferSegmentsTable
self.FreeSurferImportAllSegmentsOption = self.ui.FreeSurferImportAllSegmentsOption
self.FreeSurferImportAllSegmentsOption.stateChanged.connect(self.onStateChangedFreeSurferImportAllSegmentsOption)
self.onStateChangedFreeSurferImportAllSegmentsOption_is_running = False
# look for freesurfer default hme path and subjects path
if ('FREESURFER_HOME' in os.environ.keys()):
self.FreeSurfer_home_path = os.environ['FREESURFER_HOME']
self.FreeSurferBrowseHomeFolderPushButton.directory = self.FreeSurfer_home_path
if ('SUBJECTS_DIR' in os.environ.keys()):
self.FreeSurfer_subjects_path = os.environ['SUBJECTS_DIR']
self.FreeSurferBrowseSubjectsFolderPushButton.directory = self.FreeSurfer_subjects_path
# Populate the file combobox
for file_name in self.freesurferFilesOfInterest.keys():
self.InputFreeSurferFileSelection.addItem(file_name)
self.ui.ImportButton.connect('clicked(bool)', self.onClickImportButton)
self.SubjectsTableWidget = self.ui.SubjectsTableWidget
self.SegmentsTableWidget = self.ui.SegmentsTableWidget
self.ui.SaveCleanDataCheckBox.setChecked(True)
self.ui.SaveCleanDataCheckBox.connect('toggled(bool)', self.onSaveCleanDataCheckBoxToggled)
self.SubjectsTableWidget.connect('cellClicked(int, int)', self.onSubjectsTableWidgetCellClicked)
self.SegmentsTableWidget.connect('cellClicked(int, int)', self.onSegmentsTableWidgetCellClicked)
self.ui.DisplaySelectedPushButton.connect('clicked(bool)', self.onClickDisplaySelectedPushButton)
self.ui.DisplayOnClickCheckBox.connect('toggled(bool)', self.onDisplayOnClickCheckBoxToggled)
# Set self.displayOnClick according to ui file
self.onDisplayOnClickCheckBoxToggled()
# Initialize the beginning input type.
self.onSaveCleanDataCheckBoxToggled()
# Shape Analysis Structure Generation
self.InputShapeAnalysisFolderNameLineEdit = self.ui.InputShapeAnalysisFolderNameLineEdit
self.ShapeAnalysisFolderPushButton = self.ui.ShapeAnalysisFolderPushButton
self.ShapeAnalysisFolderPushButton.connect('directoryChanged(QString)', self.onShapeAnalysisFolderChanged)
self.CreateShapeAnalysisStructurePushButton = self.ui.CreateShapeAnalysisStructurePushButton
self.CreateShapeAnalysisStructurePushButton.connect('clicked(bool)', self.onGenerateShapeAnalysisStructure)
# detect if when a node is added to update colortable list
self.registerCallbacks()
#clear on scene close
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
def onSceneStartClose(self, caller, event):
self.logic.cleanup()
self.resetSubjectsTable()
self.resetSegmentsTable()
self.resetFreeSurferSubjectsTable()
self.resetFreeSurferSegmentsTable()
#
# Reset all the data for data import
#
def cleanup(self):
logging.debug('Cleaning up widget')
self.resetFreeSurferSubjectsTable()
self.resetFreeSurferSegmentsTable()
self.resetSubjectsTable()
self.resetSegmentsTable()
self.resetGlobalVariables()
self.unregisterCallbacks()
@property
def inputShapeAnalysisPath(self):
return self.InputShapeAnalysisFolderNameLineEdit.text
@inputShapeAnalysisPath.setter
def inputShapeAnalysisPath(self, value):
self.InputShapeAnalysisFolderNameLineEdit.text = value
@property
def inputPath(self):
return self.InputFolderNameLineEdit.text
@inputPath.setter
def inputPath(self, value):
self.InputFolderNameLineEdit.text = value
def initSubjectsTable(self):
"""
Set options and headers of SubjectsTable.
Does not require any other data structure populated.
"""
##### Subjects Table
self.resetSubjectsTable()
nameColumn = 0
consistencyColumn = 1
checkColumn = 2
nameColumnLabel = 'Subject name'
consistencyColumnLabel = 'Consistency'
checkColumnLabel = "Use as template"
self.SubjectsTableWidget.setColumnCount(3)
self.SubjectsTableWidget.setHorizontalHeaderLabels([
nameColumnLabel,
consistencyColumnLabel,
checkColumnLabel
])
self.SubjectsTableWidget.horizontalHeader().setSectionResizeMode(qt.QHeaderView.Stretch)
self.SubjectsTableWidget.verticalHeader().setVisible(False)
self.SubjectsTableWidget.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
def initSegmentsTable(self):
"""
Set options and headers of SegmentsTable for the case of a single subject displayed.
Does not require any other data structure populated.
"""
self.resetSegmentsTable()
self.segmentsColumnSubjectName = -1
self.segmentsColumnSegmentName = 0
self.segmentsColumnTopologyCurrent = 1
self.segmentsColumnTopologyExpected = 2
segmentNameColumnLabel = 'Segment Name'
topologyCurrentColumnLabel = 'Current Segment Topology'
topologyExpectedColumnLabel = 'Expected Cohort Topology'
self.SegmentsTableWidget.setColumnCount(3)
self.SegmentsTableWidget.setHorizontalHeaderLabels([
segmentNameColumnLabel,
topologyCurrentColumnLabel,
topologyExpectedColumnLabel
])
self.SegmentsTableWidget.horizontalHeader().setSectionResizeMode(qt.QHeaderView.Stretch)
self.SegmentsTableWidget.verticalHeader().setVisible(False)
self.SegmentsTableWidget.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
def initSegmentsMultiTable(self):
"""
Set options and headers of SegmentsTable for the case of multiple subjects displayed.
Does not require any other data structure populated.
"""
self.resetSegmentsTable()
self.segmentsColumnSubjectName = 0
self.segmentsColumnSegmentName = 1
self.segmentsColumnTopologyCurrent = 2
self.segmentsColumnTopologyExpected = 3
subjectNameColumnLabel = 'Subject'
segmentNameColumnLabel = 'Segment'
topologyCurrentColumnLabel = 'Current Segment Topology'
topologyExpectedColumnLabel = 'Expected Cohort Topology'
self.SegmentsTableWidget.setColumnCount(4)
self.SegmentsTableWidget.setHorizontalHeaderLabels([
subjectNameColumnLabel,
segmentNameColumnLabel,
topologyCurrentColumnLabel,
topologyExpectedColumnLabel
])
self.SegmentsTableWidget.horizontalHeader().setSectionResizeMode(qt.QHeaderView.Stretch)
self.SegmentsTableWidget.verticalHeader().setVisible(False)
self.SegmentsTableWidget.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
def resetSubjectsTable(self):
if self.SubjectsTableWidget is not None:
self.SubjectsTableWidget.setRowCount(0)
def resetSegmentsTable(self):
if self.SegmentsTableWidget is not None:
self.SegmentsTableWidget.setRowCount(0)
def getRowsFromSelectedIndexes(self, tableWidget):
""" Return set with unique rows from selectedIndexes of input table. """
currentSelectedIndexes = tableWidget.selectedIndexes()
uniqueRowIndexes = set()
for qModelIndex in currentSelectedIndexes:
uniqueRowIndexes.add(qModelIndex.row())
return list(uniqueRowIndexes)
def populateSegmentsTableWithCurrentSubjectsSelection(self):
uniqueRowIndexes = self.getRowsFromSelectedIndexes(self.SubjectsTableWidget)
# Get Names from rows
if len(uniqueRowIndexes) == 1:
# Save current selection
selectedSegmentIndex = self.getRowsFromSelectedIndexes(self.SegmentsTableWidget)
self.initSegmentsTable()
name = self.SubjectsTableWidget.item(uniqueRowIndexes[0], self.subjectsColumnName).text()
self.populateSegmentsTable(name)
# Restore selection
for rowindex in selectedSegmentIndex:
self.SegmentsTableWidget.selectRow(rowindex)
return
self.initSegmentsMultiTable()
for row in uniqueRowIndexes:
name = self.SubjectsTableWidget.item(row, self.subjectsColumnName).text()
self.populateSegmentsMultiTable(name)
def updateSubjectsTableConsistencyColumn(self):
self.SubjectsTableWidget.setSortingEnabled(False)
consistencyColumn = 1
rowCount = self.SubjectsTableWidget.rowCount
if not rowCount:
return
inconsistenciesExist, inconsistentDict = self.logic.populateInconsistentTopologyDict()
for row in range(0, rowCount):
name = self.SubjectsTableWidget.item(row, 0).text()
consistency = 'Consistent'
countInconsistencies = 0
if name in inconsistentDict:
countInconsistencies = len(inconsistentDict[name])
if countInconsistencies > 0:
consistency = '# Inconsistencies: ' + str(countInconsistencies)
consistencyBackground = qt.QBrush(qt.QColor(255, 204, 203)) # light red
else:
consistencyBackground = qt.QBrush(qt.QColor(255, 255, 255)) # white
self.SubjectsTableWidget.item(row, consistencyColumn).setText(consistency)
self.SubjectsTableWidget.item(row, consistencyColumn).setBackground(consistencyBackground)
#XXX is this the best place to trigger re-populate?
self.resetSegmentsTable()
self.populateSegmentsTableWithCurrentSubjectsSelection()
self.SubjectsTableWidget.setSortingEnabled(True)
def populateSubjectsTable(self):
"""
PRE: Requires self.logic.topologyDict, and self.logic.inconsistentTopologyDict populated.
POST: Populate SubjectTable with the names of the files
"""
if not self.logic.topologyDict:
logging.error("Trying to populateSubjectsTable with non existant topologyDict.")
return
self.TemplateButtonLookup = {}
# Required to safely populate table when sorting is enabled, restored later.
self.SubjectsTableWidget.setSortingEnabled(False)
nameColumn = 0
consistencyColumn = 1
checkColumn = 2
buttonGroup = qt.QButtonGroup(self.SubjectsTableWidget)
buttonGroup.setExclusive(True)
inconsistenciesExist = False
inconsistentDict = {}
# User can change self.logic.expectedTopologiesBySegment prior to call this function
for name in self.logic.topologyDict:
# Populate subject names
rowPosition = self.SubjectsTableWidget.rowCount
if rowPosition == 0:
self.logic.TemplateName = name
inconsistenciesExist, inconsistentDict = self.logic.populateInconsistentTopologyDict()
self.SubjectsTableWidget.insertRow(rowPosition)
nameItem = qt.QTableWidgetItem(name)
nameItem.setFlags(self.tableWidgetItemDefaultFlags)
self.SubjectsTableWidget.setItem(rowPosition, nameColumn, nameItem)
# Populate consistency column
consistency = 'Consistent'
countInconsistencies = 0
if name in inconsistentDict:
countInconsistencies = len(inconsistentDict[name])
if countInconsistencies > 0:
consistency = '# Inconsistencies: ' + str(countInconsistencies)
consistencyItem = qt.QTableWidgetItem(consistency)
consistencyItem.setFlags(self.tableWidgetItemDefaultFlags)
self.SubjectsTableWidget.setItem(rowPosition, consistencyColumn, consistencyItem)
if countInconsistencies > 0:
consistencyItem.setBackground(qt.QBrush(qt.QColor(255, 204, 203))) # light red
#populate checkboxes
checkItem = qt.QRadioButton()
if rowPosition == 0:
checkItem.setChecked(True)
buttonGroup.addButton(checkItem)
self.TemplateButtonLookup[buttonGroup.id(checkItem)] = name
self.SubjectsTableWidget.setCellWidget(rowPosition, checkColumn, checkItem)
# Restore sorting
self.SubjectsTableWidget.setSortingEnabled(True)
#connect buttons
buttonGroup.connect('buttonClicked(int)', self.onTemplateRadioButtons)
def onTemplateRadioButtons(self, id):
if self.TemplateButtonLookup[id] != self.logic.TemplateName:
if self.TemplateButtonLookup[id] in self.logic.topologyDict:
self.logic.TemplateName = self.TemplateButtonLookup[id]
self.logic.initExpectedTopologyBySubjectTemplate(self.logic.topologyDict, self.logic.TemplateName)
self.updateSubjectsTableConsistencyColumn()
def populateSegmentsTable(self, nameKey):
"""
Given the name acting as first key for self.logic.topologyDict,
populates the segment table for the subject with such a name.
PRE: topologyDict has to have a key equal to input nameKey
POST: Populates SegmentsTable (appending) for given name.
"""
if not self.logic.topologyDict:
logging.error("Trying to populateSegmentsTable with non existant topologyDict.")
return
if not nameKey in self.logic.topologyDict:
logging.error("Input nameKey: {} does not exist in topologyDict.".format(nameKey))
return
# Required to safely populate table when sorting is enabled, restored later.
self.SegmentsTableWidget.setSortingEnabled(False)
# Block signals while populating programatically
self.SegmentsTableWidget.blockSignals(True)
self.SegmentsTableWidget.hide()
segmentNameColumn = 0
topologyCurrentColumn = 1
topologyExpectedColumn = 2
# cohortConsistencyColumn = 2
for segmentName in self.logic.topologyDict[nameKey]:
# Populate segmentName row
rowPosition = self.SegmentsTableWidget.rowCount
self.SegmentsTableWidget.insertRow(rowPosition)
segmentNameItem = qt.QTableWidgetItem(segmentName)
segmentNameItem.setFlags(self.tableWidgetItemDefaultFlags)
self.SegmentsTableWidget.setItem(rowPosition, segmentNameColumn, segmentNameItem )
# Get topology and consistency of segment
topologyCurrent, consistency = self.logic.getTopologyAndConsistencyString(nameKey, segmentName)
# Populate topology row
topologyCurrentItem = qt.QTableWidgetItem(topologyCurrent)
topologyCurrentItem.setFlags(self.tableWidgetItemDefaultFlags)
self.SegmentsTableWidget.setItem(rowPosition, topologyCurrentColumn, topologyCurrentItem)
if consistency == 'Inconsistent':
topologyCurrentItem.setBackground(qt.QBrush(qt.QColor(255, 204, 203))) # light red
topologyExpected = self.logic.expectedTopologiesBySegment[segmentName]
comboBox = self._createTopologyTypesComboBox()
comboBox.setCurrentIndex(self.logic.topologyTypeToIndex[topologyExpected])
comboBox.connect('currentIndexChanged(int)', lambda index, name=segmentName: self.onSegmentTableWidgetComboBoxCurrentIndexChanged(index, name))
self.SegmentsTableWidget.setCellWidget(rowPosition, topologyExpectedColumn, comboBox)
# Restore sorting
self.SegmentsTableWidget.setSortingEnabled(True)
# Restore signals
self.SegmentsTableWidget.blockSignals(False)
self.SegmentsTableWidget.show()
def populateSegmentsMultiTable(self, nameKey):
"""
Given the name acting as first key for self.logic.topologyDict,
populates the segment table for the subject with such a name.
PRE: topologyDict has to have a key equal to input nameKey
POST: Populates SegmentsTable (appending) for given name.
The difference between this and populateSegmentsTable is that
the table is populated differentely.
"""
### TODO: Merge both populateSegmentsXTable to avoid repetition.
if not self.logic.topologyDict:
logging.error("Trying to populateSegmentsMultiTable with non existant topologyDict.")
return
if not nameKey in self.logic.topologyDict:
logging.error("Input nameKey: {} does not exist in topologyDict.".format(nameKey))
return
# Required to safely populate table when sorting is enabled, restored later.
self.SegmentsTableWidget.setSortingEnabled(False)
# Block signals while populating programatically
self.SegmentsTableWidget.blockSignals(True)
self.SegmentsTableWidget.hide()
subjectNameColumn = 0
segmentNameColumn = 1
topologyCurrentColumn = 2
topologyExpectedColumn = 3
# cohortConsistencyColumn = 2
for segmentName in self.logic.topologyDict[nameKey]:
# Populate segmentName row
rowPosition = self.SegmentsTableWidget.rowCount
self.SegmentsTableWidget.insertRow(rowPosition)
# subjectName
subjectNameItem = qt.QTableWidgetItem(nameKey)
subjectNameItem.setFlags(self.tableWidgetItemDefaultFlags)
self.SegmentsTableWidget.setItem(rowPosition, subjectNameColumn, subjectNameItem )
# segmentName
segmentNameItem = qt.QTableWidgetItem(segmentName)
segmentNameItem.setFlags(self.tableWidgetItemDefaultFlags)
self.SegmentsTableWidget.setItem(rowPosition, segmentNameColumn, segmentNameItem )
# Get topology and consistency of segment
topologyCurrent, consistency = self.logic.getTopologyAndConsistencyString(nameKey, segmentName)
# Populate topology row
topologyCurrentItem = qt.QTableWidgetItem(topologyCurrent)
topologyCurrentItem.setFlags(self.tableWidgetItemDefaultFlags)
self.SegmentsTableWidget.setItem(rowPosition, topologyCurrentColumn, topologyCurrentItem)
if consistency == 'Inconsistent':
topologyCurrentItem.setBackground(qt.QBrush(qt.QColor(255, 204, 203))) # light red
topologyExpected = self.logic.expectedTopologiesBySegment[segmentName]
comboBox = self._createTopologyTypesComboBox()
comboBox.setCurrentIndex(self.logic.topologyTypeToIndex[topologyExpected])
comboBox.connect('currentIndexChanged(int)', lambda index, name=segmentName: self.onSegmentTableWidgetComboBoxCurrentIndexChanged(index, name))
self.SegmentsTableWidget.setCellWidget(rowPosition, topologyExpectedColumn, comboBox)
# Restore sorting
self.SegmentsTableWidget.setSortingEnabled(True)
# Restore signals
self.SegmentsTableWidget.blockSignals(False)
self.SegmentsTableWidget.show()
def _createTopologyTypesComboBox(self):
"""
Return ComboBox with values from enum TOPOLOGY_TYPES
"""
comboBox = qt.QComboBox()
for string_value in self.logic.TOPOLOGY_TYPES.values():
comboBox.addItem(string_value)
return comboBox
def importFiles(self, filePaths):
"""
Use logic.importFiles, populateTopologyDict and populate tables.
"""
if not self.logic.importFiles(filePaths):
logging.warning("logic.importFiles issues, see raised errors.")
return
# Populate the topology table
self.logic.populateTopologyDictionary()
self.logic.populateInconsistentTopologyDict()
######### Init Tables ##########
self.initSubjectsTable()
self.initSegmentsTable()
######### Populate Tables ##########
self.populateSubjectsTable()
self.SubjectsTableWidget.setCurrentCell(0, 0)
self.onSubjectsTableWidgetCellClicked(0, 0)
#freesurfer tab functions
def resetFreeSurferSubjectsTable(self):
if self.InputFreeSurferSubjectsTable is not None:
self.InputFreeSurferSubjectsTable.setRowCount(0)
def initFreeSurferSubjectsTable(self):
self.resetFreeSurferSubjectsTable()
self.freesurferSubjectImport = 0
self.freesurferSubjectName = 1
freesurferSubjectImportLabel = 'Import'
freesurferSubjectLabel = 'Subject'
self.InputFreeSurferSubjectsTable.setColumnCount(2)
self.InputFreeSurferSubjectsTable.setHorizontalHeaderLabels([
freesurferSubjectImportLabel,
freesurferSubjectLabel
])
self.InputFreeSurferSubjectsTable.verticalHeader().setVisible(False)
self.InputFreeSurferSubjectsTable.setSortingEnabled(True)
self.InputFreeSurferSubjectsTable.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
# Resize the columns nicely
header = self.InputFreeSurferSubjectsTable.horizontalHeader()
header.setSectionResizeMode(self.freesurferSubjectImport, qt.QHeaderView.ResizeToContents)
header.setSectionResizeMode(self.freesurferSubjectName, qt.QHeaderView.Stretch)
def addRowToFreeSurferSubjectsTable(self, subject_name, path):
#create checkbox with a centered layout
check_box = qt.QCheckBox()
check_box.setChecked(False)
check_box.stateChanged.connect(lambda state, x=path: self.onToggleFreeSurferSubjectSelection(x))
container = qt.QWidget();
layout = qt.QHBoxLayout(container);
layout.addWidget(check_box);
layout.setAlignment(qt.Qt.AlignCenter);
layout.setContentsMargins(0, 0, 0, 0);
container.setLayout(layout);
rowPosition = self.InputFreeSurferSubjectsTable.rowCount
self.InputFreeSurferSubjectsTable.insertRow(rowPosition)
self.InputFreeSurferSubjectsTable.setCellWidget(rowPosition , self.freesurferSubjectImport, container)
self.InputFreeSurferSubjectsTable.setItem(rowPosition , self.freesurferSubjectName, qt.QTableWidgetItem(subject_name))
def resetFreeSurferSegmentsTable(self):
if self.InputFreeSurferSegmentsTable is not None:
self.InputFreeSurferSegmentsTable.setRowCount(0)
def initFreeSurferSegmentsTable(self):
self.resetFreeSurferSegmentsTable()
self.freesurferSegmentsImport = 0
self.freesurferSegmentsName = 1
freesurferSegmentsImportLabel = 'Import'
freesurferSegmentsLabel = 'Segment'
self.InputFreeSurferSegmentsTable.setColumnCount(2)
self.InputFreeSurferSegmentsTable.setHorizontalHeaderLabels([
freesurferSegmentsImportLabel,
freesurferSegmentsLabel
])
self.InputFreeSurferSegmentsTable.verticalHeader().setVisible(False)
self.InputFreeSurferSegmentsTable.setSortingEnabled(True)
self.InputFreeSurferSegmentsTable.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
# Resize the columns nicely
header = self.InputFreeSurferSegmentsTable.horizontalHeader()
header.setSectionResizeMode(self.freesurferSubjectImport, qt.QHeaderView.ResizeToContents)
header.setSectionResizeMode(self.freesurferSubjectName, qt.QHeaderView.Stretch)
def addRowToFreeSurferSegmentsTable(self, segment_name, label_id):
#create checkbox with a centered layout
check_box = qt.QCheckBox()
check_box.setChecked(False)
check_box.stateChanged.connect(lambda state, x=label_id: self.onToggleFreeSurferSegmentSelection(x))
container = qt.QWidget();
layout = qt.QHBoxLayout(container);
layout.addWidget(check_box);
layout.setAlignment(qt.Qt.AlignCenter);
layout.setContentsMargins(0, 0, 0, 0);
container.setLayout(layout);
rowPosition = self.InputFreeSurferSegmentsTable.rowCount
self.InputFreeSurferSegmentsTable.insertRow(rowPosition)
self.InputFreeSurferSegmentsTable.setCellWidget(rowPosition , self.freesurferSegmentsImport, container)
self.InputFreeSurferSegmentsTable.setItem(rowPosition , self.freesurferSegmentsName, qt.QTableWidgetItem(segment_name))
def uncheckFreeSurferTables(self):
# uncheck subjects
subject_number = self.InputFreeSurferSubjectsTable.rowCount
for i_row in range(subject_number):
rowItem = self.InputFreeSurferSubjectsTable.cellWidget(i_row, 0).children()[1]
rowItem.blockSignals(True)
rowItem.setChecked(False)
rowItem.blockSignals(False)
# uncheck segments
segment_number = self.InputFreeSurferSegmentsTable.rowCount
for i_row in range(segment_number):
rowItem = self.InputFreeSurferSegmentsTable.cellWidget(i_row, 0).children()[1]
rowItem.blockSignals(True)
rowItem.setChecked(False)
rowItem.blockSignals(False)
def resetFreeSurferTab(self):
self.logic.freesurfer_wanted_segments = []
self.uncheckFreeSurferTables()
def resetCSVTab(self):
# reset CSV tab
self.InputCSVFileNameLineEdit.text = ''
def resetDirectoryTab(self):
# reset directroy tab
self.inputPath = ''
self.FolderDirectoryButton.blockSignals(True)
self.FolderDirectoryButton.directory = '/'
self.FolderDirectoryButton.blockSignals(False)
#
# GUI Callback functions: Shape Analysis Structure Generation Callbacks
#
def onShapeAnalysisFolderChanged(self, directoryPath):
logging.debug("onShapeAnalysisFolderChanged: {}".format(directoryPath))
# Create a list of files from the directory
directory = qt.QDir(directoryPath)
if not directory.exists():
logging.error("Directory {} does not exist.".format(directory))
return
self.inputShapeAnalysisPath = directoryPath
def onGenerateShapeAnalysisStructure(self):
logging.debug("onGenerateShapeAnalysisStructure")
# Check for imported data
if len(self.logic.segmentationDict)==0:
logging.error("Empty segmentation dictionary, import data before generating the Shape Analysis Structure.")
return
if self.inputShapeAnalysisPath == '':
logging.error("No Shape Analysis folder specified")
return
self.logic.generateShapeAnlaysisStructure(self.inputShapeAnalysisPath)
print('the shape analysis folder located at %s is ready' % self.inputShapeAnalysisPath)
def onCurrentTabChanged(self, index):
"""Resize tabs to fit minimal space
"""
for i in range(self.ImporterTypeTabWidget.count):
if(i!=index):
self.ImporterTypeTabWidget.widget(i).setSizePolicy(qt.QSizePolicy.Ignored, qt.QSizePolicy.Ignored);
self.ImporterTypeTabWidget.widget(index).setSizePolicy(qt.QSizePolicy.Preferred, qt.QSizePolicy.Preferred);
self.ImporterTypeTabWidget.widget(index).resize(self.ImporterTypeTabWidget.widget(index).minimumSizeHint);
self.ImporterTypeTabWidget.widget(index).adjustSize();
# empty import list
self.filteredFilePathsList = []
# reset import option of other tabs
tab_text = self.ImporterTypeTabWidget.tabText(index)
if tab_text=='Import from FreeSurfer':
self.logic.setFreeSurferimport(True)
self.logic.setExpectedFileType('VolumeFile')
try:
self.resetDirectoryTab()
self.resetCSVTab()
except:
pass
elif tab_text=='Import from CSV':
self.logic.setFreeSurferimport(False)
self.logic.setExpectedFileType('None')
try:
self.resetFreeSurferTab()
self.resetDirectoryTab()
except:
pass
elif tab_text=='Import from directory':
self.logic.setFreeSurferimport(False)
try:
self.InputFileTypeSelection.setCurrentIndex(1)
self.InputFileTypeSelection.setCurrentIndex(0)
self.resetFreeSurferTab()
self.resetCSVTab()
except:
pass
else:
try:
self.logic.setFreeSurferimport(False)
self.resetFreeSurferTab()
self.resetDirectoryTab()
self.resetCSVTab()
except:
pass
#
# GUI Callback functions: FreeSurfer UI Callbacks
#
def onFreeSurferHomeDirectoryChanged(self, freesurfer_home_path):
logging.debug("onFreeSurferHomeDirectoryChanged: {}".format(freesurfer_home_path))
LUT_path = os.path.join(freesurfer_home_path, 'FreeSurferColorLUT.txt')
if not os.path.isfile(LUT_path):
logging.error("Directory {} is not a valid FreeSurfer directory, impossible to find FreeSurferColorLUT.txt.".format(directory))
return
#Set directory variable and lineEdit
self.freesurfer_home_path = freesurfer_home_path
self.InputFreeSurferHomeFolderNameLineEdit.text = freesurfer_home_path
#init the correspondance labels->segment names dict
self.logic.initFreeSurferLUT(LUT_path)
def onFreeSurferSubjectsDirectoryChanged(self, freesurfer_subjects_path):
"""
Populates self.inputPath and self.filteredFilePathsList
containing a list of files contained in the directoryPath
that can be used in this module.
"""
logging.debug("onFreeSurferSubjectsDirectoryChanged: {}".format(freesurfer_subjects_path))
# Check directory existance
directory = qt.QDir(freesurfer_subjects_path)
if not directory.exists():
logging.error("Directory {} does not exist.".format(directory))
return
# Set directory variable and lineEdit
self.freesurfer_subjects_path = freesurfer_subjects_path
self.InputFreeSurferSubjectsFolderNameLineEdit.text = freesurfer_subjects_path
# init subject list and segments list
current_file = self.InputFreeSurferFileSelection.currentText
if current_file != '':
self.onFreeSurferFileSelectionChanged(current_file)
def onFreeSurferFileSelectionChanged(self, file_name):
self.filteredFilePathsList = []
self.initFreeSurferSubjectsTable()
self.initFreeSurferSegmentsTable()
if file_name == "":
return
try:
subjects_path = self.freesurfer_subjects_path
file_path = self.freesurferFilesOfInterest[file_name]
except:
return
for subject_name in os.listdir(subjects_path):
subject_path = os.path.join(subjects_path, subject_name)
if os.path.isdir(subject_path):
abs_path = os.path.join(subject_path, file_path)
if os.path.isfile(abs_path):
template_path = abs_path
self.addRowToFreeSurferSubjectsTable(subject_name, abs_path)
label_ids = self.logic.getFreeSurferAvailableSegmentIds(template_path)
self.logic.freesurfer_wanted_segments = []
# populate segments selection table
for label_id in label_ids:
segment_name = self.logic.freesurfer_lut_dict[label_id]['name']
self.addRowToFreeSurferSegmentsTable(segment_name, label_id)
def onToggleFreeSurferSubjectSelection(self, path):
if not self.onStateChangedFreeSurferImportAllSubjectsOption_is_running:
self.FreeSurferImportAllSubjectsOption.blockSignals(True)
self.FreeSurferImportAllSubjectsOption.setChecked(False)
self.FreeSurferImportAllSubjectsOption.blockSignals(False)
if path in self.filteredFilePathsList:
index = self.filteredFilePathsList.index(path)
self.filteredFilePathsList.pop(index)
else:
self.filteredFilePathsList.append(path)
def onStateChangedFreeSurferImportAllSubjectsOption(self):
self.onStateChangedFreeSurferImportAllSubjectsOption_is_running = True
subject_number = self.InputFreeSurferSubjectsTable.rowCount
for i_row in range(subject_number):
rowItem = self.InputFreeSurferSubjectsTable.cellWidget(i_row, 0).children()[1]
rowItem.setChecked(self.FreeSurferImportAllSubjectsOption.isChecked())
self.onStateChangedFreeSurferImportAllSubjectsOption_is_running = False
def onToggleFreeSurferSegmentSelection(self, label_id):
if not self.onStateChangedFreeSurferImportAllSegmentsOption_is_running:
self.FreeSurferImportAllSegmentsOption.blockSignals(True)
self.FreeSurferImportAllSegmentsOption.setChecked(False)
self.FreeSurferImportAllSegmentsOption.blockSignals(False)
label_id = 'Label_' + label_id
if label_id in self.logic.freesurfer_wanted_segments:
index = self.logic.freesurfer_wanted_segments.index(label_id)
self.logic.freesurfer_wanted_segments.pop(index)
else:
self.logic.freesurfer_wanted_segments.append(label_id)
def onStateChangedFreeSurferImportAllSegmentsOption(self):
self.onStateChangedFreeSurferImportAllSegmentsOption_is_running = True
segment_number = self.InputFreeSurferSegmentsTable.rowCount
for i_row in range(segment_number):
rowItem = self.InputFreeSurferSegmentsTable.cellWidget(i_row, 0).children()[1]
rowItem.setChecked(self.FreeSurferImportAllSegmentsOption.isChecked())
self.onStateChangedFreeSurferImportAllSegmentsOption_is_running = False
#events to detect new or deleted color table
def registerCallbacks(self):
self.nodeAddedModifiedObserverTag = slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAddedEvent, self.onMRMLNodeAddedEvent)
self.nodeAboutToBeRemovedModifiedObserverTag = slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAboutToBeRemovedEvent, self.onMRMLNodeAboutToBeRemovedEvent)
def unregisterCallbacks(self):
slicer.mrmlScene.RemoveObserver(self.nodeAddedModifiedObserverTag)
slicer.mrmlScene.RemoveObserver(self.nodeAboutToBeRemovedModifiedObserverTag)
@vtk.calldata_type(vtk.VTK_OBJECT)
def onMRMLNodeAddedEvent(self, caller, eventId, callData):
node_type = callData.GetClassName()
name = callData.GetName()
id = callData.GetID()
if node_type == 'vtkMRMLColorTableNode':
self.color_table_dict[name] = id
self.InputFolderColorTableSelection.addItem(name)
self.InputCSVColorTableSelection.addItem(name)
# print("Color Node added")
# print("New node: {0}".format(name))
@vtk.calldata_type(vtk.VTK_OBJECT)
def onMRMLNodeAboutToBeRemovedEvent(self, caller, eventId, callData):
node_type = callData.GetClassName()
name = callData.GetName()
if node_type == 'vtkMRMLColorTableNode':
self.color_table_dict.pop(name)
id = self.InputFolderColorTableSelection.findText(name)
self.InputFolderColorTableSelection.removeItem(id)
id = self.InputCSVColorTableSelection.findText(name)
self.InputCSVColorTableSelection.removeItem(id)
# print("Color Node removed")
# print("New node: {0}".format(name))
#
# Handle request to import data
#
def onColorTableSelectionChanged(self, color_table_name):
index = self.InputFolderColorTableSelection.findText(color_table_name, qt.Qt.MatchFixedString)
if index >= 0:
self.InputFolderColorTableSelection.blockSignals(True)
self.InputFolderColorTableSelection.setCurrentIndex(index)
self.InputFolderColorTableSelection.blockSignals(False)
index = self.InputCSVColorTableSelection.findText(color_table_name, qt.Qt.MatchFixedString)
if index >= 0:
self.InputCSVColorTableSelection.blockSignals(True)
self.InputCSVColorTableSelection.setCurrentIndex(index)
self.InputCSVColorTableSelection.blockSignals(False)
if color_table_name == 'None':
self.logic.setColorTableId(color_table_name)
else:
ID = self.color_table_dict[color_table_name]
self.logic.setColorTableId(ID)
def onFileTypeSelectionChanged(self, file_type):
if file_type == 'Volume File':
self.logic.setExpectedFileType('VolumeFile')
elif file_type == 'Segmentation File':
self.logic.setExpectedFileType('SegmentationFile')
elif file_type == 'Model File':
self.logic.setExpectedFileType('ModelFile')
else:
self.logic.setExpectedFileType('None')
def onClickImportButton(self):
if not self.filteredFilePathsList:
logging.warning('List of files is empty, choose a folder or a csv file to import first.')
return
if len(self.logic.segmentationDict) != 0:
logging.warning('Importing new data will delete the previous import')
if slicer.util.confirmYesNoDisplay('Importing new data will delete the previous import,\ndo you want to import anyway?', windowTitle=None):
self.logic.cleanup()
self.resetSubjectsTable()
self.resetSegmentsTable()
else:
logging.info("import aborted")
return
self.importFiles(self.filteredFilePathsList)
def filterFilePaths(self, filePathsList):
"""
Return filtered filePaths of files that are readable by this module.
"""
filteredFilePathsList = list()
for filePath in filePathsList:
fileType = slicer.app.ioManager().fileType(filePath)
if fileType == 'VolumeFile' or fileType == 'SegmentationFile' or fileType == 'ModelFile':
filteredFilePathsList.append(filePath)
# else:
# logging.debug("File: {} with fileType {} is not readable by this module.".format(filePath, fileType))
return filteredFilePathsList
def onClickCSVBrowseFilePushButton(self):
csvFileName = qt.QFileDialog.getOpenFileName(self.widget, "Open CSV File", ".", "CSV Files (*.csv)")
self.InputCSVFileNameLineEdit.text = csvFileName
filePathsList = self.logic.filePathsFromCSVFile(csvFileName)
self.filteredFilePathsList = self.filterFilePaths(filePathsList)
def onDirectoryChanged(self, directoryPath):
"""
Populates self.inputPath and self.filteredFilePathsList
containing a list of files contained in the directoryPath
that can be used in this module.
"""
logging.debug("onDirectoryChanged: {}".format(directoryPath))
# Create a list of files from the directory
directory = qt.QDir(directoryPath)
if not directory.exists():
logging.error("Directory {} does not exist.".format(directory))
return
self.inputPath = directoryPath
fileNameList = directory.entryList(qt.QDir.Files | qt.QDir.Readable)
# Trim fileList to only accept types recognized by slicer
filePathsList = list()
for name in fileNameList:
filePathsList.append(os.path.join(directoryPath, name))
self.filteredFilePathsList = self.filterFilePaths(filePathsList)
def onSubjectsTableWidgetCellClicked(self, row, column):
"""
On click in Subjects table populates segment table, and optionally display indexes.
"""
if self.SubjectsTableWidget.rowCount == 0:
return
self.populateSegmentsTableWithCurrentSubjectsSelection()
if self.displayOnClick:
self.displaySelectedIndexes()
def onSegmentsTableWidgetCellClicked(self, row, column):
"""
On click in Subjects, optionally display indexes.
"""
if self.SegmentsTableWidget.rowCount == 0:
return
if self.displayOnClick:
self.displaySelectedIndexes()
def onSegmentTableWidgetComboBoxCurrentIndexChanged(self, index, name):
# Change self.logic.expectTopologiesBySegment
newTopology = self.logic.indexToTopologyType[index]
logging.debug("SegmentTableWidgetComboBox changed. index: {}, name: {}, newTopology: {}.".format(index, name, newTopology))
self.logic.expectedTopologiesBySegment[name] = newTopology
# Update Consistency column in SubjectsTable
self.updateSubjectsTableConsistencyColumn()
def onSaveCleanDataCheckBoxToggled(self):
self.logic.setSaveCleanData(self.ui.SaveCleanDataCheckBox.isChecked())
def onDisplayOnClickCheckBoxToggled(self):
self.displayOnClick = self.ui.DisplayOnClickCheckBox.isChecked()
def onClickDisplaySelectedPushButton(self):
self.displaySelectedIndexes()
'''
Supplemental functions to update the visualizations
'''
def center3dView(self):
layoutManager = slicer.app.layoutManager()
threeDWidget = layoutManager.threeDWidget(0)
threeDView = threeDWidget.threeDView()
threeDView.resetFocalPoint()
def setVisibilitySegmentations(self, visibility):
""" visiblity boolean """
nodes = [node for node in self.logic.segmentationDict.values()]
for node in nodes:
displayNode = node.GetDisplayNode()
displayNode.SetVisibility(visibility)
displayNode.SetAllSegmentsVisibility(visibility)
def hideAllSegmentations(self):
self.setVisibilitySegmentations(False)
def displaySelectedIndexes(self):
self.SubjectsTableWidget.setSortingEnabled(False)
self.SegmentsTableWidget.setSortingEnabled(False)
self.hideAllSegmentations()
# Get selection of both tables
rowsSubjects = self.getRowsFromSelectedIndexes(self.SubjectsTableWidget)
rowsSegments = self.getRowsFromSelectedIndexes(self.SegmentsTableWidget)
countSubjects = len(rowsSubjects)
countSegments = len(rowsSegments)
if not countSubjects and not countSegments:
pass
# Update column indexes (sanity)
segmentsColumnCount = self.SegmentsTableWidget.columnCount
hasSegmentsColumnSubjectName = True if segmentsColumnCount == 4 else False
if hasSegmentsColumnSubjectName:
self.segmentsColumnSubjectName = 0
self.segmentsColumnSegmentName = 1
self.segmentsColumnTopologyCurrent = 2
self.segmentsColumnTopologyExpected = 3
else:
self.segmentsColumnSubjectName = -1
self.segmentsColumnSegmentName = 0
self.segmentsColumnTopologyCurrent = 1
self.segmentsColumnTopologyExpected = 2
# segmentationNodes = list()
# segmentationNodes.append(node)
for row in rowsSubjects:
subjectName = self.SubjectsTableWidget.item(row, self.subjectsColumnName).text()
node = self.logic.segmentationDict[subjectName]
segmentationDisplayNode = node.GetDisplayNode()
segmentationDisplayNode.SetVisibility(True)
if countSegments == 0:
segmentationDisplayNode.SetAllSegmentsVisibility(True)
self.center3dView()
subjectName = None
for row in rowsSegments:
if hasSegmentsColumnSubjectName:
subjectName = self.SegmentsTableWidget.item(row, self.segmentsColumnSubjectName).text()
else:
if countSubjects:
subjectName = self.SubjectsTableWidget.item(rowsSubjects[0], self.subjectsColumnName).text()
else:
continue
node = self.logic.segmentationDict[subjectName]
segmentName = self.SegmentsTableWidget.item(row, self.segmentsColumnSegmentName).text()
segmentId = node.GetSegmentation().GetSegmentIdBySegmentName(segmentName)
segmentationDisplayNode = node.GetDisplayNode()
segmentationDisplayNode.SetVisibility(True)
segmentationDisplayNode.SetSegmentVisibility(segmentId, True)
self.SubjectsTableWidget.setSortingEnabled(True)
self.SegmentsTableWidget.setSortingEnabled(True)
# DataImporterLogic
#
class DataImporterTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
self.testDir = os.path.join(slicer.app.temporaryPath, 'DataImporterTest')
self.downloads = ()
self.casesLabelMap = (
'case01.nrrd',
'case02.nrrd'
)
self.casesSegmentation = (
'case01_allSegments.seg.nrrd',
'case02_allSegments.seg.vtm',
)
self.casesModel = (
'sample_model.vtk',
)
# create dir if non-existant
if not os.path.isdir(self.testDir):
os.mkdir(self.testDir)
# populate self.download()
self.downloadData()
def downloadData(self):
"""
Download data, unzip and populate self.downloads
"""
logging.info("-- Start download")
import urllib
self.downloads = (
('https://data.kitware.com/api/v1/item/5b7c5b758d777f06857c890d/download', 'case01.nrrd', slicer.util.loadLabelVolume),
('https://data.kitware.com/api/v1/item/5b7c5b798d777f06857c8910/download', 'case02.nrrd', slicer.util.loadLabelVolume),
('https://data.kitware.com/api/v1/item/5b7f43eb8d777f06857cb204/download', 'case01_allSegments.seg.nrrd', slicer.util.loadSegmentation),
('https://data.kitware.com/api/v1/item/5b802f178d777f06857cb665/download', 'case02_allSegments.seg.vtm.zip', 'Unzip'),
('https://data.kitware.com/api/v1/item/5b8d65aa8d777f43cc9850f4/download', 'sample_model.vtk', slicer.util.loadModel),
)
for url, name, loader in self.downloads:
filePath = os.path.join(self.testDir, name)
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
logging.info('Requesting download %s from %s...\n' % (name, url))
urllib.urlretrieve(url, filePath)
if loader == 'Unzip' and not os.path.exists(filePath[:-4]):
slicer.app.applicationLogic().Unzip(filePath, self.testDir)
logging.info("Unzipping done")
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.delayDisplay('Starting...')
self.setUp()
##### LabelMap #####
for fileName in self.casesLabelMap:
self.test_importLabelMapFromFile(fileName)
##### Segmentation #####
for fileName in self.casesSegmentation:
self.test_importSegmentationFromFile(fileName)
##### Model #####
for fileName in self.casesModel:
self.test_importModelFromFile(fileName)
##### CSV #####
self.test_filenamesFromCSVFile()
##### All #####
self.test_importFiles()
##########
self.test_populateDictSegmentNamesWithIntegers()
self.test_computeMode()
self.delayDisplay('All tests passed!')
def printMembers(self, logic):
print('labelMapDict', logic.labelMapDict)
print('segmentationDict', logic.segmentationDict)
print('cohort label range', logic.labelRangeInCohort)
print('topologyDict', logic.topologyDict)
print('inconsistentTopologyDict', logic.inconsistentTopologyDict)
print('polyDataDict', logic.polyDataDict)
def test_importLabelMapFromFile(self, fileName):
"""
Exercises correctness of importLabelMap (single file), plus test methods on LabelMap data:
- populateTopologyDictionary
- checkTopologyConsistency
"""
logging.info('-- Starting test for %s --' % fileName)
filePath = os.path.join(self.testDir, fileName)
logic = DataImporterLogic()
self.assertTrue(logic.importLabelMap(filePath))
logic.populateTopologyDictionary()
logic.populateInconsistentTopologyDict()
self.printMembers(logic)
if fileName == 'case01.nrrd':
self.assertNotEqual(logic.labelMapDict, dict())
self.check_case01(logic, fileName)
elif fileName == 'case02.nrrd':
self.assertNotEqual(logic.labelMapDict, dict())
self.check_case02(logic, fileName)
logging.info('-- Test for %s passed (importLabelMap)! --' % fileName)
def check_case01(self, logic, fileName):
logging.info('-- Checking case01 --')
self.assertTrue('case01' in fileName)
self.assertNotEqual(logic.segmentationDict, dict())
self.assertEqual(logic.labelRangeInCohort, (0, 5))
self.assertNotEqual(logic.topologyDict, dict())
self.assertNotEqual(logic.polyDataDict, dict())
segmentName = "1" # Disk
topologyString = logic.getTopologyString(fileName, segmentName)
self.assertEqual(topologyString, logic.TOPOLOGY_TYPES[logic.TOPOLOGY_STRIP_TYPE])
segmentName = "2" # Sphere
topologyString = logic.getTopologyString(fileName, segmentName)
self.assertEqual(topologyString, logic.TOPOLOGY_TYPES[logic.TOPOLOGY_SPHERE_TYPE])
logging.info('-- case01 passed! --')
def check_case02(self, logic, fileName):
logging.info('-- Checking case02 --')
self.assertTrue('case02' in fileName)
self.assertNotEqual(logic.segmentationDict, dict())
self.assertEqual(logic.labelRangeInCohort, (0, 5))
self.assertNotEqual(logic.topologyDict, dict())
self.assertNotEqual(logic.polyDataDict, dict())
segmentName = "2" # Sphere
topologyString = logic.getTopologyString(fileName, segmentName)
self.assertEqual(topologyString, logic.TOPOLOGY_TYPES[logic.TOPOLOGY_SPHERE_TYPE])
logging.info('-- case02 passed! --')
def test_importSegmentationFromFile(self, fileName):
"""
Exercises correctness of importSegmentation (single file), plus test methods on LabelMap data:
- populateTopologyDictionary
- checkTopologyConsistency
"""
logging.info('-- Starting segmentation test for %s --' % fileName)
filePath = os.path.join(self.testDir, fileName)
logic = DataImporterLogic()
self.assertTrue(logic.importSegmentation(filePath))
logic.populateTopologyDictionary()
logic.populateInconsistentTopologyDict()
self.printMembers(logic)
if fileName == 'case01_allSegments.seg.nrrd':
self.check_case01(logic, fileName)
elif fileName == 'case02_allSegments.seg.vtm':
self.check_case02(logic, fileName)
logging.info('-- Test for %s passed (importSegmentation)! --' % fileName)
def test_importModelFromFile(self, fileName):
logging.info('-- Starting model test for %s --' % fileName)
filePath = os.path.join(self.testDir, fileName)
logic = DataImporterLogic()
self.assertTrue(logic.importModel(filePath))
logic.populateTopologyDictionary()
logic.populateInconsistentTopologyDict()
self.printMembers(logic)
self.assertNotEqual(logic.modelDict, dict())
self.assertEqual(logic.labelRangeInCohort, (0, 1))
self.assertNotEqual(logic.topologyDict, dict())
self.assertNotEqual(logic.polyDataDict, dict())
# All consistent
self.assertEqual(logic.inconsistentTopologyDict, dict())
segmentName = "1" # Sphere
topologyString, consistentTopologyString = logic.getTopologyAndConsistencyString(fileName, segmentName)
self.assertEqual(topologyString, logic.TOPOLOGY_TYPES[logic.TOPOLOGY_SPHERE_TYPE])
self.assertEqual(consistentTopologyString, 'Consistent')
logging.info('-- Test for %s passed (importModel) ! --' % fileName)
def test_importFiles(self):
"""
Test importing more images from a folder
"""
logging.info('-- Starting test_importFiles --')
self.assertTrue(os.path.isdir(self.testDir))
logic = DataImporterLogic()
# Load one label map and one segmentation
preNumberOfNodesLabelMapVolume = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLLabelMapVolumeNode")
preNumberOfNodesSegmentation = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLSegmentationNode")
filePaths = [os.path.join(self.testDir, self.casesLabelMap[0]),
os.path.join(self.testDir, self.casesSegmentation[1]),
]
logic.importFiles(filePaths)
self.assertEqual(slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLLabelMapVolumeNode"), preNumberOfNodesLabelMapVolume + 1)
self.assertEqual(slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLSegmentationNode"), preNumberOfNodesSegmentation + 2)
# Try to load not existing file
filePaths = [os.path.join(self.testDir, 'not_existing_for_sure.nrrd'), ]
self.assertRaises(TypeError, logic.importFiles, filePaths)
# Try file with different label cohort
numberOfKeys = len(logic.topologyDict.keys())
numberOfModels = len(logic.modelDict.keys())
filePaths = [os.path.join(self.testDir, self.casesModel[0]), ]
logic.importFiles(filePaths)
# Warning to console, file is not loaded, and no member is modified.
self.assertEqual(numberOfKeys, len(logic.topologyDict.keys()))
self.assertEqual(numberOfModels, len(logic.modelDict.keys()))
logging.info('-- test_importFiles passed! --')
def test_populateDictSegmentNamesWithIntegers(self):
logging.info('-- Starting test_populateDictSegmentNamesWithIntegers --')
filePath = os.path.join(self.testDir, self.casesModel[0])
logic = DataImporterLogic()
logic.importFiles([filePath])
logic.populateTopologyDictionary()
logic.populateInconsistentTopologyDict()
logic.populateDictSegmentNamesWithIntegers()
self.assertEqual(len(logic.dictSegmentNamesWithIntegers.keys()), 1)
for name in logic.topologyDict:
for segmentName in logic.topologyDict[name]:
self.assertTrue(segmentName in logic.dictSegmentNamesWithIntegers)
self.assertEqual(logic.dictSegmentNamesWithIntegers[segmentName], 1)
def test_computeMode(self):
exampleDict = {
'name0':
{'segmentName0': '0', 'segmentName1': '1'},
'name1':
{'segmentName0': '1', 'segmentName1': '0'},
'name2':
{'segmentName0': '1', 'segmentName1': '0'}
}
logic = DataImporterLogic()
mode0 = logic._computeModeOfSegment(exampleDict, 'segmentName0')
mode1 = logic._computeModeOfSegment(exampleDict, 'segmentName1')
self.assertEqual(mode0, str(1))
self.assertEqual(mode1, str(0))
mode_none = logic._computeModeOfSegment(exampleDict, 'non_existing')
self.assertEqual(mode_none, None)
def test_filenamesFromCSVFile(self):
# Create the file:
csvFilePath = ''
with open(os.path.join(self.testDir, 'filePaths.csv'), 'w') as fileCsv:
fileCsv.write(os.path.join(self.testDir, self.casesLabelMap[0]) + '\n')
fileCsv.write(os.path.join(self.testDir, self.casesLabelMap[1]))
csvFilePath = fileCsv.name
logging.info("csvFilePath: {}".format(csvFilePath))
logic = DataImporterLogic()
filePaths = logic.filePathsFromCSVFile(csvFilePath)
logging.info(filePaths)
self.assertTrue(len(filePaths), 2)
self.assertTrue(self.casesLabelMap[0] in filePaths[0])
self.assertTrue(self.casesLabelMap[1] in filePaths[1])
| 41.343881 | 164 | 0.741075 |
7af7a406c9e4df5c354347027aaebf36eb17e682 | 1,471 | py | Python | pex/vendor/_vendored/setuptools/setuptools/py33compat.py | hbmartin/pex | a4c5d96e16dac892a6d84b02bdb3c0b8e14e9e1b | [
"Apache-2.0"
] | null | null | null | pex/vendor/_vendored/setuptools/setuptools/py33compat.py | hbmartin/pex | a4c5d96e16dac892a6d84b02bdb3c0b8e14e9e1b | [
"Apache-2.0"
] | 1 | 2020-03-02T14:52:32.000Z | 2020-03-02T14:52:32.000Z | pex/vendor/_vendored/setuptools/setuptools/py33compat.py | hbmartin/pex | a4c5d96e16dac892a6d84b02bdb3c0b8e14e9e1b | [
"Apache-2.0"
] | null | null | null | import dis
import array
import collections
try:
import html
except ImportError:
html = None
if "__PEX_UNVENDORED__" in __import__("os").environ:
from setuptools.extern import six # vendor:skip
else:
from pex.third_party.setuptools.extern import six
if "__PEX_UNVENDORED__" in __import__("os").environ:
from setuptools.extern.six.moves import html_parser # vendor:skip
else:
from pex.third_party.setuptools.extern.six.moves import html_parser
__metaclass__ = type
OpArg = collections.namedtuple('OpArg', 'opcode arg')
class Bytecode_compat:
def __init__(self, code):
self.code = code
def __iter__(self):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
bytes = array.array('b', self.code.co_code)
eof = len(self.code.co_code)
ptr = 0
extended_arg = 0
while ptr < eof:
op = bytes[ptr]
if op >= dis.HAVE_ARGUMENT:
arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg
ptr += 3
if op == dis.EXTENDED_ARG:
long_type = six.integer_types[-1]
extended_arg = arg * long_type(65536)
continue
else:
arg = None
ptr += 1
yield OpArg(op, arg)
Bytecode = getattr(dis, 'Bytecode', Bytecode_compat)
unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape)
| 22.984375 | 76 | 0.604351 |
1750235e93ac5a5eec414a9f7d0bf1b2c14d62db | 5,737 | py | Python | data/utils/sample.py | slowbull/leaf | a2eda2b551fb0db8ddf88ae8c9e60adf965c7e85 | [
"BSD-2-Clause"
] | 1 | 2020-01-03T03:45:14.000Z | 2020-01-03T03:45:14.000Z | data/utils/sample.py | slowbull/leaf | a2eda2b551fb0db8ddf88ae8c9e60adf965c7e85 | [
"BSD-2-Clause"
] | null | null | null | data/utils/sample.py | slowbull/leaf | a2eda2b551fb0db8ddf88ae8c9e60adf965c7e85 | [
"BSD-2-Clause"
] | null | null | null | '''
samples from all raw data;
by default samples in a non-iid manner; namely, randomly selects users from
raw data until their cumulative amount of data exceeds the given number of
datapoints to sample (specified by --fraction argument);
ordering of original data points is not preserved in sampled data
'''
import argparse
import json
import os
import random
from constants import DATASETS
from util import iid_divide
parser = argparse.ArgumentParser()
parser.add_argument('--name',
help='name of dataset to parse; default: sent140;',
type=str,
choices=DATASETS,
default='sent140')
parser.add_argument('--iid',
help='sample iid;',
action="store_true")
parser.add_argument('--niid',
help="sample niid;",
dest='iid', action='store_false')
parser.add_argument('--fraction',
help='fraction of all data to sample; default: 0.1;',
type=float,
default=0.1)
parser.add_argument('--u',
help=('number of users in iid data set; ignored in niid case;'
'represented as fraction of original total number of users; '
'default: 0.01;'),
type=float,
default=0.01)
parser.set_defaults(iid=False)
args = parser.parse_args()
print('------------------------------')
print('sampling data')
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(parent_path, args.name, 'data')
subdir = os.path.join(data_dir, 'all_data')
files = os.listdir(subdir)
files = [f for f in files if f.endswith('.json')]
new_user_count = 0 # for iid case
for f in files:
file_dir = os.path.join(subdir, f)
with open(file_dir, 'r') as inf:
data = json.load(inf)
num_users = len(data['users'])
tot_num_samples = sum(data['num_samples'])
num_new_samples = int(args.fraction * tot_num_samples)
hierarchies = None
if(args.iid):
raw_list = list(data['user_data'].values())
raw_x = [elem['x'] for elem in raw_list]
raw_y = [elem['y'] for elem in raw_list]
x_list = [item for sublist in raw_x for item in sublist] # flatten raw_x
y_list = [item for sublist in raw_y for item in sublist] # flatten raw_y
num_new_users = int(round(args.u * num_users))
if num_new_users == 0:
num_new_users += 1
indices = [i for i in range(tot_num_samples)]
new_indices = random.sample(indices, num_new_samples)
# TODO: seed this random
users = [str(i+new_user_count) for i in range(num_new_users)]
user_data = {}
for user in users:
user_data[user] = {'x': [], 'y': []}
all_x_samples = [x_list[i] for i in new_indices]
all_y_samples = [y_list[i] for i in new_indices]
x_groups = iid_divide(all_x_samples, num_new_users)
y_groups = iid_divide(all_y_samples, num_new_users)
for i in range(num_new_users):
user_data[users[i]]['x'] = x_groups[i]
user_data[users[i]]['y'] = y_groups[i]
num_samples = [len(user_data[u]['y']) for u in users]
new_user_count += num_new_users
else:
ctot_num_samples = 0
users = data['users']
users_and_hiers = None
if 'hierarchies' in data:
users_and_hiers = list(zip(users, data['hierarchies']))
random.shuffle(users_and_hiers)
else:
random.shuffle(users)
user_i = 0
num_samples = []
user_data = {}
if 'hierarchies' in data:
hierarchies = []
while(ctot_num_samples < num_new_samples):
hierarchy = None
if users_and_hiers is not None:
user, hier = users_and_hiers[user_i]
else:
user = users[user_i]
cdata = data['user_data'][user]
cnum_samples = len(data['user_data'][user]['y'])
if (ctot_num_samples + cnum_samples > num_new_samples):
cnum_samples = num_new_samples - ctot_num_samples
indices = [i for i in range(cnum_samples)]
new_indices = random.sample(indices, cnum_samples)
x = []
y = []
for i in new_indices:
x.append(data['user_data'][user]['x'][i])
y.append(data['user_data'][user]['y'][i])
cdata = {'x': x, 'y': y}
if 'hierarchies' in data:
hierarchies.append(hier)
num_samples.append(cnum_samples)
user_data[user] = cdata
ctot_num_samples += cnum_samples
user_i += 1
if 'hierarchies' in data:
users = [u for u, h in users_and_hiers][:user_i]
else:
users = users[:user_i]
# ------------
# create .json file
all_data = {}
all_data['users'] = users
if hierarchies is not None:
all_data['hierarchies'] = hierarchies
all_data['num_samples'] = num_samples
all_data['user_data'] = user_data
slabel = ''
if(args.iid):
slabel = 'iid'
else:
slabel = 'niid'
arg_frac = str(args.fraction)
arg_frac = arg_frac[2:]
arg_nu = str(args.u)
arg_nu = arg_nu[2:]
arg_label = arg_frac
if(args.iid):
arg_label = '%s_%s' % (arg_nu, arg_label)
file_name = '%s_%s_%s.json' % ((f[:-5]), slabel, arg_label)
ouf_dir = os.path.join(data_dir, 'sampled_data', file_name)
print('writing %s' % file_name)
with open(ouf_dir, 'w') as outfile:
json.dump(all_data, outfile)
| 31.696133 | 83 | 0.576085 |
64e569584424cd25d27c242c42579806703fa7de | 6,893 | py | Python | scripts/summary.py | DanielKowalczyk1984/GSBPA | 2aa35c3d23ec02bf503738772c93ea5f8bcb339d | [
"MIT"
] | null | null | null | scripts/summary.py | DanielKowalczyk1984/GSBPA | 2aa35c3d23ec02bf503738772c93ea5f8bcb339d | [
"MIT"
] | null | null | null | scripts/summary.py | DanielKowalczyk1984/GSBPA | 2aa35c3d23ec02bf503738772c93ea5f8bcb339d | [
"MIT"
] | null | null | null |
# %%
import re
import pandas as pd
import numpy as np
import os
import sys
from shutil import copy, copyfile
from pathlib import Path
from pandas.core import groupby
from tikzplotlib import save as tikz_save
import matplotlib.pyplot as plt
workdir = Path.cwd().parent
results = workdir.joinpath(Path("./results"))
# %% Load the data of the new results
file_name = "~/results_2021_12_10.csv"
file_path = Path(file_name)
data = pd.read_csv(file_name, index_col=False)
match = re.search(r'(CG_overall|results)\_(20\d{2})\_(\d{2})\_(\d{2})\.csv', file_name)
year = match.group(2)
month = match.group(3)
day = match.group(4)
# %% Calculate some extra columns
data['gap'] = (data['global_upper_bound'] - data['global_lower_bound']
)/(data['global_lower_bound'] + 0.00001)
data['opt'] = data['global_lower_bound'] == data['global_upper_bound']
data['reduction'] = (data['first_size_graph'] -
data['size_after_reduced_cost'])/(data['first_size_graph'] + 0.000001)
data['Inst'] = data.NameInstance.apply(
lambda x: int(re.search(r'.*\_(\d+)', x).group(1)))
for it in data.columns[3:12]:
data[it] = 0.7*data[it]
# %% create result directory and copy results to that directory
results_path = results.joinpath("./results_{}_{}_{}".format(year, month, day))
if results_path.exists() == False:
os.mkdir(results_path)
# copy(file_path, results_path.joinpath(match.group(0)))
tex_file = str()
# %% Create tex files for Column generation results
template_dir_path = results.joinpath("./template_dir")
for lst in template_dir_path.iterdir():
if lst.name == "CG_tables_template.tex":
copy(lst, results_path.joinpath(
"CG_tables_{}_{}_{}.tex".format(year, month, day)))
tex_file = str(results_path.joinpath(
"CG_tables_{}_{}_{}.tex".format(year, month, day)))
else:
copy(lst, results_path.joinpath(lst.name))
os.popen("sd \"CG_summary_20191024.csv\" \"CG_summary_{}_{}_{}.csv\" ".format(
year, month, day)+tex_file)
os.popen("sd \"CG_allinstances_20191024.csv\" \"CG_allinstances_{}_{}_{}.csv\" ".format(
year, month, day)+tex_file)
# %% Compute summary results for CG over all solvers
summary_grouped = data.groupby(['pricing_solver', 'n', 'm'])
aggregation = {"tot_lb": {np.max, np.mean},
"gap": {np.max, np.mean},
"first_size_graph": {np.max, np.mean},
"opt": np.sum,
"reduction": {np.max, np.mean},
"tot_cputime": {np.max, np.mean}}
summary_write = summary_grouped.agg(aggregation).pivot_table(index=['n', 'm'], values=[
'tot_lb', 'gap', 'first_size_graph', 'reduction', 'opt'], columns=['pricing_solver'])
print(summary_write.columns)
summary_write.columns.set_levels(
['AFBC', 'TI_BDD'], level=2, inplace=True)
summary_write.columns = ["_".join(x) for x in summary_write.columns.ravel()]
summary_write.to_csv(results_path.joinpath(
"/CG_summary_{}_{}_{}.csv".format(year, month, day)))
# %% pivot results for all pricing_solvers
all_instances = data.pivot_table(values=['tot_lb', 'gap', 'first_size_graph', 'reduction', 'opt', 'rel_error', 'nb_generated_col',
'global_lower_bound', 'global_upper_bound', 'tot_cputime', 'tot_bb'], index=['n', 'm', 'Inst'], columns=['pricing_solver'])
all_instances.columns.set_levels(
['AFBC'], level=1, inplace=True)
all_instances.columns = ["_".join(x) for x in all_instances.columns.ravel()]
all_instances.to_csv(
results_path.joinpath(
"CG_allinstances_{}_{}_{}.csv".format(year, month, day)))
# %% Load results of Pessoa et al. and Oliveira qnd Pessoa
df_pessoa = pd.read_csv(results.joinpath("all_pessoa.csv"))
df_pessoa.Opt = df_pessoa.Opt.apply(str)
df_pessoa['best'] = df_pessoa.apply(lambda x: re.search(
r'[^0-9]?(\d+)', x['Opt']).group(1), axis=1)
df_pessoa.best = df_pessoa.best.apply(pd.to_numeric)
df_oliveira = pd.read_csv(results.joinpath("oliveira_overall.csv"))
# %% Merge our results with results of Oliveira
df_all = pd.merge(data, df_oliveira, on=['Inst', 'n', 'm'])
# %% Compute overall performance profile curve
df_all['best_solver'] = df_all[['tot_bb', 'TimeOliveira']].min(axis=1)
df_all['ratio_tot_bb_best'] = df_all['tot_bb'] / df_all['best_solver']
df_all['ratio_TimeOliveira_best'] = df_all['TimeOliveira'] / \
df_all['best_solver']
sorted_ratio_tot_bb = df_all[['ratio_tot_bb_best']
].sort_values(by='ratio_tot_bb_best')
yvals = np.arange(len(sorted_ratio_tot_bb)) / \
float(len(sorted_ratio_tot_bb) - 1.0)
sorted_ratio_TimeOliveira = df_all[['ratio_TimeOliveira_best']].sort_values(
by='ratio_TimeOliveira_best')
yvalues = np.arange(len(sorted_ratio_TimeOliveira)) / \
float(len(sorted_ratio_TimeOliveira) - 1.0)
width, height = plt.figaspect(1.68)
fig, ax = plt.subplots(figsize=(width, height), dpi=200)
ax.step(sorted_ratio_tot_bb, yvals, label='BDD')
ax.step(sorted_ratio_TimeOliveira, yvalues, label='ATIF')
ax.set_xlim([10**0, 100])
# ax.set_title(
# r"Performance profile for instances with $m = %d$ and $n = %d$"
# % (i, j))
ax.set_xlabel(r"$\tau$")
ax.set_ylabel(r"$P(r_{p,s} \leq \tau)$")
ax.legend(loc='lower right')
# plt.savefig('profile_curve%d_%d.pdf' % (i, j), dpi=200)
name_file = 'profile_curve_overall_{}_{}_{}.tex'.format(year, month, day)
tikz_save(results_path.joinpath(name_file))
plt.savefig(results_path.joinpath('profile_curve_overall_{}_{}_{}.pdf'.format(
year, month, day)), dpi=200)
# %% Compute performance profile curves per instance class
for n in [40, 50]:
for m in [2, 4]:
sorted_ratio_tot_bb = df_all.loc[(df_all['n'] == n) & (
df_all["m"] == m), "ratio_tot_bb_best"].sort_values()
yvals = np.arange(len(sorted_ratio_tot_bb)) / \
float(len(sorted_ratio_tot_bb) - 1.0)
sorted_ratio_TimeOliveira = df_all.loc[(df_all['n'] == n) & (
df_all["m"] == m), "ratio_TimeOliveira_best"].sort_values()
yvalues = np.arange(len(sorted_ratio_TimeOliveira)) / \
float(len(sorted_ratio_TimeOliveira) - 1.0)
width, height = plt.figaspect(1.68)
fig, ax = plt.subplots(figsize=(width, height), dpi=200)
ax.step(sorted_ratio_tot_bb, yvals, label='BDD')
ax.step(sorted_ratio_TimeOliveira, yvalues, label='ATIF')
ax.set_xlim([10**0, 100])
ax.set_title(
"Performance profile for instances with $m = {}$ and $n = $".format(m, n))
ax.set_xlabel(r"$\tau$")
ax.set_ylabel(r"$P(r_{p,s} \leq \tau)$")
ax.legend(loc='lower right')
name_file = 'profile_curve_overall_{}_{}_{}_{}_{}.tex'.format(
n, m, year, month, day)
tikz_save(results_path.joinpath(name_file))
plt.savefig(results_path.joinpath('profile_curve_{}_{}_{}_{}_{}.pdf'.format(
n, m, year, month, day)), dpi=200)
| 40.786982 | 164 | 0.663717 |
6bd7740e334a6023307f7fdb56f01a7ad1992a01 | 362 | py | Python | src/default_documents/migrations/0033_auto_20160203_1238.py | PhaseDMS/phase | 4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e | [
"MIT"
] | 2 | 2021-09-10T19:40:30.000Z | 2022-01-31T07:15:51.000Z | src/default_documents/migrations/0033_auto_20160203_1238.py | PhaseDMS/phase | 4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e | [
"MIT"
] | null | null | null | src/default_documents/migrations/0033_auto_20160203_1238.py | PhaseDMS/phase | 4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e | [
"MIT"
] | 1 | 2021-09-10T19:40:42.000Z | 2021-09-10T19:40:42.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('default_documents', '0032_auto_20160203_1238'),
]
operations = [
migrations.RenameField(
model_name='correspondence',
old_name='contract_number',
new_name='contract_number_old',
),
]
| 21.294118 | 57 | 0.618785 |
30d6634844f68eb209fa5572bc3eb54d8bffb9fc | 7,326 | py | Python | kur/containers/layers/pooling.py | ZacharyJacobCollins/Learning_Kur | ecb28d38437e9162ae465de25a143642ce16d1b6 | [
"Apache-2.0"
] | 1 | 2017-03-30T21:42:33.000Z | 2017-03-30T21:42:33.000Z | kur/containers/layers/pooling.py | ZacharyJacobCollins/Learning_Kur | ecb28d38437e9162ae465de25a143642ce16d1b6 | [
"Apache-2.0"
] | null | null | null | kur/containers/layers/pooling.py | ZacharyJacobCollins/Learning_Kur | ecb28d38437e9162ae465de25a143642ce16d1b6 | [
"Apache-2.0"
] | 1 | 2019-11-05T17:11:27.000Z | 2019-11-05T17:11:27.000Z | """
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . import Layer, ParsingError
###############################################################################
class Pooling(Layer): # pylint: disable=too-few-public-methods
""" A pooling layer
# Properties
size: int or list of ints (required). The size of each kernel. If an
integer is used, it is interpretted as a one-dimensional
convolution, the same as if it were put into a length-1 list.
strides: int or list of ints (optional; default: 1 in each dimension).
The stride (subsampling rate) between convolutions. If a list, it
must be the same length as `size` and specify the stride in each
respective dimension. If a single number, it is used as the stride
in each dimension.
pool: one of (max, average). The pool function to apply.
# Example
```
pool:
size: [2, 2]
strides: 1
type: max
```
"""
POOL_TYPES = ('max', 'average')
###########################################################################
@classmethod
def get_container_name(cls):
""" Returns the name of the container class.
"""
return 'pool'
###########################################################################
def __init__(self, *args, **kwargs):
""" Creates a new pooling layer.
"""
super().__init__(*args, **kwargs)
self.size = None
self.strides = None
self.pooltype = None
###########################################################################
def _parse(self, engine):
""" Parses out the pooling layer.
"""
# Parse self
if isinstance(self.args, dict):
if 'size' not in self.args:
raise ParsingError('Missing key "size" in pooling container.')
self.size = engine.evaluate(self.args['size'], recursive=True)
if 'strides' in self.args:
self.strides = engine.evaluate(self.args['strides'],
recursive=True)
if 'type' in self.args:
self.pooltype = engine.evaluate(self.args['type']).lower()
if self.pooltype not in Pooling.POOL_TYPES:
raise ParsingError('Unknown pool type "{}". Pool type can '
'be one of: {}'.format(
self.pooltype, ', '.join(Pooling.POOL_TYPES)
))
else:
self.size = engine.evaluate(self.args, recursive=True)
if self.pooltype is None:
self.pooltype = Pooling.POOL_TYPES[0]
if not isinstance(self.size, (list, tuple)):
self.size = [self.size]
if not 1 <= len(self.size) <= 3:
raise ParsingError('Only pooling layers with dimensions 1, 2, '
'or 3 are supported.')
for i in range(len(self.size)):
try:
self.size[i] = int(self.size[i])
except ValueError:
raise ParsingError('All "size" entries must evaluate to '
'integers. We received this instead: {}'
.format(self.size[i]))
if self.strides is not None:
if not isinstance(self.strides, (list, tuple)):
try:
self.strides = int(self.strides)
except ValueError:
raise ParsingError('"strides" must evaluate to an '
'integer or a list of integers.')
self.strides = [self.strides] * len(self.size)
else:
if len(self.strides) != len(self.size):
raise ParsingError('If "strides" is a list, it must '
'be the same length as "size".')
for i in range(len(self.strides)):
try:
self.strides[i] = int(self.strides[i])
except ValueError:
raise ParsingError('Each element of "strides" '
'must evaluate to an integer.')
else:
self.strides = [1] * len(self.size)
###########################################################################
def _build(self, model):
""" Instantiates the layer with the given backend.
"""
backend = model.get_backend()
if backend.get_name() == 'keras':
if backend.keras_version() == 1:
import keras.layers as L # pylint: disable=import-error
kwargs = {
'pool_size' : self.size,
'strides' : self.strides,
'border_mode' : 'valid',
'name' : self.name
}
if len(self.size) == 1:
kwargs['pool_length'] = kwargs.pop('pool_size')
kwargs['stride'] = kwargs.pop('strides')
else:
import keras.layers.pooling as L # pylint: disable=import-error
kwargs = {
'pool_size' : self.size,
'strides' : self.strides,
'padding' : 'valid',
'name' : self.name
}
if len(self.size) == 1:
kwargs['pool_size'] = kwargs.pop('pool_size')[0]
else:
kwargs['data_format'] = 'channels_last'
if self.pooltype == 'max':
func = {
1 : L.MaxPooling1D,
2 : L.MaxPooling2D,
3 : L.MaxPooling3D
}.get(len(self.size))
elif self.pooltype == 'average':
func = {
1 : L.AveragePooling1D,
2 : L.AveragePooling2D,
3 : L.AveragePooling3D
}.get(len(self.size))
else:
raise ValueError('Unhandled pool type "{}". This is a bug.',
self.pooltype)
if func is None:
raise ValueError('Invalid pool function for pool type "{}" '
'the supplied pool parameters. This is a bug.'
.format(self.pooltype))
yield func(**kwargs)
elif backend.get_name() == 'pytorch':
import torch.nn as nn # pylint: disable=import-error
from kur.backend.pytorch.modules import swap_channels
if self.pooltype == 'max':
func = {
1 : nn.MaxPool1d,
2 : nn.MaxPool2d,
3 : nn.MaxPool3d
}.get(len(self.size))
elif self.pooltype == 'average':
func = {
1 : nn.AvgPool1d,
2 : nn.AvgPool2d,
3 : nn.AvgPool3d
}.get(len(self.size))
else:
raise ValueError('Unhandled pool type "{}". This is a bug.',
self.pooltype)
def connect(inputs):
""" Connects the layers.
"""
assert len(inputs) == 1
output = model.data.add_operation(
swap_channels
)(inputs[0]['layer'])
output = model.data.add_layer(
self.name,
func(
self.size,
self.strides,
padding=0,
dilation=1,
ceil_mode=False
)
)(output)
output = model.data.add_operation(
swap_channels
)(output)
return {
'shape' : self.shape([inputs[0]['shape']]),
'layer' : output
}
yield connect
else:
raise ValueError(
'Unknown or unsupported backend: {}'.format(backend))
###########################################################################
def shape(self, input_shapes):
""" Returns the output shape of this layer for a given input shape.
"""
if len(input_shapes) > 1:
raise ValueError('Pooling layers only take a single input.')
input_shape = input_shapes[0]
if len(input_shape) != len(self.size) + 1:
raise ValueError('Invalid input shape to a pooling layer: {}'
.format(input_shape))
output_shape = tuple(
(input_shape[i] - self.size[i]) // self.strides[i] + 1
for i in range(len(self.size))
) + (input_shape[-1], )
return output_shape
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
| 29.304 | 79 | 0.603877 |
f09e10f806231b6889a17c36cc8a29eeb4d2f761 | 19,827 | py | Python | opendr/test_renderer.py | yukihiko/hrm | 89bfb075d3c9ba91826c0c782ca6aff9507c663b | [
"MIT"
] | 1 | 2019-05-09T07:36:04.000Z | 2019-05-09T07:36:04.000Z | opendr/test_renderer.py | yukihiko/hrm | 89bfb075d3c9ba91826c0c782ca6aff9507c663b | [
"MIT"
] | null | null | null | opendr/test_renderer.py | yukihiko/hrm | 89bfb075d3c9ba91826c0c782ca6aff9507c663b | [
"MIT"
] | null | null | null | # #!/usr/bin/env python
# # encoding: utf-8
# """
# Author(s): Matthew Loper
#
# See LICENCE.txt for licensing and contact information.
# """
#
# import time
# import math
# import unittest
# import numpy as np
# import unittest
# import mathutils
# try:
# import matplotlib.pyplot as plt
# import matplotlib
# except:
# from dummy import dummy as plt
#
# from renderer import *
# from chumpy import Ch
# from chumpy.utils import row, col
# from lighting import *
# from util_tests import get_earthmesh, process
# from collections import OrderedDict
# import ipdb
#
#
# visualize = False
#
# def getcam():
# from opendr.camera import ProjectPoints
#
# w = 256
# h = 192
#
# f = np.array([200,200])
# rt = np.zeros(3)
# t = np.zeros(3)
# k = np.zeros(5)
# c = np.array([w/2., h/2.])
#
# if True:
# ratio = 640 / 256.
# f *= ratio
# c *= ratio
# w *= ratio
# h *= ratio
#
# pp = ProjectPoints(f=f, rt=rt, t=t, k=k, c=c)
# frustum = {'near': 1.0, 'far': 20.0, 'width': w, 'height': h}
#
# return pp, frustum
#
# class TestRenderer(unittest.TestCase):
#
# def load_basics(self):
# np.random.seed(0)
# camera, frustum = getcam()
# mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([0,0,0]))
# camera.v = mesh.v
# lighting_3channel = LambertianPointLight(
# f=mesh.f,
# num_verts=len(mesh.v),
# light_pos=np.array([-1000,-1000,-1000]),
# vc=mesh.vc,
# light_color=np.array([1., 1., 1.]))
# lighting_1channel = LambertianPointLight(
# f=mesh.f,
# num_verts=len(mesh.v),
# light_pos=np.array([-1000,-1000,-1000]),
# vc=mesh.vc.mean(axis=1).reshape((-1,1)),
# light_color=np.array([1.]))
#
# bgcolor = np.array([0.,0.,0.])
# ipdb.set_trace()
# cr = ColoredRenderer()
# cr.camera = camera
# cr.camera.openglMat = np.array(mathutils.Matrix.Rotation(radians(180), 4, 'X'))
# cr.frustum = frustum
# cr.set(v=mesh.v, f=mesh.f)
#
# renderers = [
#
# ColoredRenderer(v=mesh.v, f=mesh.f, camera=camera, frustum=frustum, bgcolor=bgcolor, num_channels=3)
# # TexturedRenderer(f=mesh.f, camera=camera, frustum=frustum, texture_image=mesh.texture_image, vt=mesh.vt, ft=mesh.ft, bgcolor=bgcolor),
# # ColoredRenderer(f=mesh.f, camera=camera, frustum=frustum, bgcolor=bgcolor[0], num_channels=1)]
# ]
#
#
# lightings = {1: lighting_1channel, 3: lighting_3channel}
# return mesh, lightings, camera, frustum, renderers
#
# def test_pyramids(self):
# """ Test that pyramid construction doesn't crash. No quality testing here. """
# mesh, lightings, camera, frustum, renderers = self.load_basics()
# from filters import gaussian_pyramid, laplacian_pyramid, GaussPyrDownOne
#
# camera.v = mesh.v
# for rn in renderers:
# lightings[rn.num_channels].v = camera.v
# rn.vc = lightings[rn.num_channels]
# rn_pyr = gaussian_pyramid(rn, normalization=None, n_levels=2)
# rn_lap = laplacian_pyramid(rn, normalization=None, imshape=rn.shape, as_list=False, n_levels=2)
# rn_gpr = GaussPyrDownOne(im_shape=rn.shape, want_downsampling=True, px=rn)
# for r in [rn_pyr, rn_lap, rn_gpr]:
# _ = r.r
#
# for r in [rn_pyr, rn_gpr]:
# for ii in range(3):
# rn.v[:,:] = rn.v[:,:].r + 1e-10
# import time
# tm = time.time()
# _ = r.dr_wrt(rn)
# #print "trial %d: %.2fS " % (ii, time.time() - tm)
#
# def test_distortion(self):
# mesh, lightings, camera, frustum, renderers = self.load_basics()
#
# renderer = renderers[1]
# lighting = lightings[renderer.num_channels]
# lighting.light_pos = -lighting.light_pos * 100.
#
# mesh = get_earthmesh(trans=np.array([0,0,-8]), rotation = np.array([math.pi/2.,0,0]))
# mesh_verts = Ch(mesh.v.flatten())
# renderer.camera = camera
# camera.v = mesh_verts
# lighting.v = mesh_verts
# renderer.vc = lighting
# renderer.camera = camera
#
# camera.rt = np.array([np.pi, 0, 0])
#
# # Get pixels and derivatives
# im_original = renderer.r.copy()
#
# #camera.k = np.zeros(5)
# #camera.k = np.arange(8,0,-1)*.1
# #camera.k = np.array([ 0.00249999, 0.42208098, 0.45360267, 0.06808415, -0.38003062])
# camera.k = np.array([ 5., 25., .3, .4, 1000., 5., 0., 0.])
# im_distorted = renderer.r
#
# cr = renderer
# cmtx = np.array([
# [cr.camera.f.r[0], 0, cr.camera.c.r[0]],
# [0, cr.camera.f.r[1], cr.camera.c.r[1]],
# [0, 0, 1]
# ])
#
# from cvwrap import cv2
# im_undistorted = cv2.undistort(im_distorted, cmtx, cr.camera.k.r)
#
# d1 = (im_original - im_distorted).ravel()
# d2 = (im_original - im_undistorted).ravel()
#
# d1 = d1[d1 != 0.]
# d2 = d2[d2 != 0.]
#
# self.assertGreater(np.mean(d1**2) / np.mean(d2**2), 44.)
# self.assertLess(np.mean(d2**2), 0.0016)
# self.assertGreater(np.median(d1**2) / np.median(d2**2), 650)
# self.assertLess(np.median(d2**2), 1.9e-5)
#
#
# if visualize:
# import matplotlib.pyplot as plt
# plt.ion()
#
# matplotlib.rcParams.update({'font.size': 18})
# plt.figure(figsize=(6*3, 2*3))
# plt.subplot(1,4,1)
# plt.imshow(im_original)
# plt.title('original')
#
# plt.subplot(1,4,2)
# plt.imshow(im_distorted)
# plt.title('distorted')
#
# plt.subplot(1,4,3)
# plt.imshow(im_undistorted)
# plt.title('undistorted by opencv')
#
# plt.subplot(1,4,4)
# plt.imshow(im_undistorted - im_original + .5)
# plt.title('diff')
#
# plt.draw()
# plt.show()
#
#
#
#
#
#
# def test_cam_derivatives(self):
# mesh, lightings, camera, frustum, renderers = self.load_basics()
#
# camparms = {
# 'c': {'mednz' : 2.2e-2, 'meannz': 4.2e-2, 'desc': 'center of proj diff', 'eps0': 4., 'eps1': .1},
# #'f': {'mednz' : 2.5e-2, 'meannz': 6e-2, 'desc': 'focal diff', 'eps0': 100., 'eps1': .1},
# 't': {'mednz' : 1.2e-1, 'meannz': 3.0e-1, 'desc': 'trans diff', 'eps0': .25, 'eps1': .1},
# 'rt': {'mednz' : 8e-2, 'meannz': 1.8e-1, 'desc': 'rot diff', 'eps0': 0.02, 'eps1': .5},
# 'k': {'mednz' : 7e-2, 'meannz': 5.1e-1, 'desc': 'distortion diff', 'eps0': .5, 'eps1': .05}
# }
#
# for renderer in renderers:
#
# im_shape = renderer.shape
# lighting = lightings[renderer.num_channels]
#
# # Render a rotating mesh
# mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([math.pi/2.,0,0]))
# mesh_verts = Ch(mesh.v.flatten())
# camera.v = mesh_verts
# lighting.v = mesh_verts
# renderer.vc = lighting
# renderer.camera = camera
#
#
# for atrname, info in list(camparms.items()):
#
# # Get pixels and derivatives
# r = renderer.r
#
# atr = lambda : getattr(camera, atrname)
# satr = lambda x : setattr(camera, atrname, x)
#
# atr_size = atr().size
# dr = renderer.dr_wrt(atr())
#
# # Establish a random direction
# tmp = np.random.rand(atr().size) - .5
# direction = (tmp / np.linalg.norm(tmp))*info['eps0']
# #direction = np.sin(np.ones(atr_size))*info['eps0']
# #direction = np.zeros(atr_size)
# # try:
# # direction[4] = 1.
# # except: pass
# #direction *= info['eps0']
# eps = info['eps1']
#
# # Render going forward in that direction
# satr(atr().r + direction*eps/2.)
# rfwd = renderer.r
#
# # Render going backward in that direction
# satr(atr().r - direction*eps/1.)
# rbwd = renderer.r
#
# # Put back
# satr(atr().r + direction*eps/2.)
#
# # Establish empirical and predicted derivatives
# dr_empirical = (np.asarray(rfwd, np.float64) - np.asarray(rbwd, np.float64)).ravel() / eps
# dr_predicted = dr.dot(col(direction.flatten())).reshape(dr_empirical.shape)
#
# images = OrderedDict()
# images['shifted %s' % (atrname,)] = np.asarray(rfwd, np.float64)-.5
# images[r'empirical %s' % (atrname,)] = dr_empirical
# images[r'predicted %s' % (atrname,)] = dr_predicted
# images[info['desc']] = dr_predicted - dr_empirical
#
# nonzero = images[info['desc']][np.nonzero(images[info['desc']]!=0)[0]]
#
# mederror = np.median(np.abs(nonzero))
# meanerror = np.mean(np.abs(nonzero))
# if visualize:
# matplotlib.rcParams.update({'font.size': 18})
# plt.figure(figsize=(6*3, 2*3))
# for idx, title in enumerate(images.keys()):
# plt.subplot(1,len(list(images.keys())), idx+1)
# im = process(images[title].reshape(im_shape), vmin=-.5, vmax=.5)
# plt.title(title)
# plt.imshow(im)
#
# print('%s: median nonzero %.2e' % (atrname, mederror,))
# print('%s: mean nonzero %.2e' % (atrname, meanerror,))
# plt.draw()
# plt.show()
#
# self.assertLess(meanerror, info['meannz'])
# self.assertLess(mederror, info['mednz'])
#
#
# def test_vert_derivatives(self):
#
# mesh, lightings, camera, frustum, renderers = self.load_basics()
#
# for renderer in renderers:
#
# lighting = lightings[renderer.num_channels]
# im_shape = renderer.shape
#
# # Render a rotating mesh
# mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([math.pi/2.,0,0]))
# mesh_verts = Ch(mesh.v.flatten())
# camera.set(v=mesh_verts)
# lighting.set(v=mesh_verts)
# renderer.set(camera=camera)
# renderer.set(vc=lighting)
#
# # Get pixels and derivatives
# r = renderer.r
# dr = renderer.dr_wrt(mesh_verts)
#
# # Establish a random direction
# direction = (np.random.rand(mesh.v.size).reshape(mesh.v.shape)-.5)*.1 + np.sin(mesh.v*10)*.2
# direction *= .5
# eps = .2
#
# # Render going forward in that direction
# mesh_verts = Ch(mesh.v+direction*eps/2.)
# lighting.set(v=mesh_verts)
# renderer.set(v=mesh_verts, vc=lighting)
# rfwd = renderer.r
#
# # Render going backward in that direction
# mesh_verts = Ch(mesh.v-direction*eps/2.)
# lighting.set(v=mesh_verts)
# renderer.set(v=mesh_verts, vc=lighting)
# rbwd = renderer.r
#
# # Establish empirical and predicted derivatives
# dr_empirical = (np.asarray(rfwd, np.float64) - np.asarray(rbwd, np.float64)).ravel() / eps
# dr_predicted = dr.dot(col(direction.flatten())).reshape(dr_empirical.shape)
#
# images = OrderedDict()
# images['shifted verts'] = np.asarray(rfwd, np.float64)-.5
# images[r'empirical verts $\left(\frac{dI}{dV}\right)$'] = dr_empirical
# images[r'predicted verts $\left(\frac{dI}{dV}\right)$'] = dr_predicted
# images['difference verts'] = dr_predicted - dr_empirical
#
# nonzero = images['difference verts'][np.nonzero(images['difference verts']!=0)[0]]
#
# if visualize:
# matplotlib.rcParams.update({'font.size': 18})
# plt.figure(figsize=(6*3, 2*3))
# for idx, title in enumerate(images.keys()):
# plt.subplot(1,len(list(images.keys())), idx+1)
# im = process(images[title].reshape(im_shape), vmin=-.5, vmax=.5)
# plt.title(title)
# plt.imshow(im)
#
# print('verts: median nonzero %.2e' % (np.median(np.abs(nonzero)),))
# print('verts: mean nonzero %.2e' % (np.mean(np.abs(nonzero)),))
# plt.draw()
# plt.show()
#
# self.assertLess(np.mean(np.abs(nonzero)), 7e-2)
# self.assertLess(np.median(np.abs(nonzero)), 4e-2)
#
#
# def test_lightpos_derivatives(self):
#
# mesh, lightings, camera, frustum, renderers = self.load_basics()
#
#
# for renderer in renderers:
#
# im_shape = renderer.shape
# lighting = lightings[renderer.num_channels]
#
# # Render a rotating mesh
# mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([math.pi/2.,0,0]))
# mesh_verts = Ch(mesh.v.flatten())
# camera.set(v=mesh_verts)
#
#
# # Get predicted derivatives wrt light pos
# light1_pos = Ch(np.array([-1000,-1000,-1000]))
# lighting.set(light_pos=light1_pos, v=mesh_verts)
# renderer.set(vc=lighting, v=mesh_verts)
#
# dr = renderer.dr_wrt(light1_pos).copy()
#
# # Establish a random direction for the light
# direction = (np.random.rand(3)-.5)*1000.
# eps = 1.
#
# # Find empirical forward derivatives in that direction
# lighting.set(light_pos = light1_pos.r + direction*eps/2.)
# renderer.set(vc=lighting)
# rfwd = renderer.r
#
# # Find empirical backward derivatives in that direction
# lighting.set(light_pos = light1_pos.r - direction*eps/2.)
# renderer.set(vc=lighting)
# rbwd = renderer.r
#
# # Establish empirical and predicted derivatives
# dr_empirical = (np.asarray(rfwd, np.float64) - np.asarray(rbwd, np.float64)).ravel() / eps
# dr_predicted = dr.dot(col(direction.flatten())).reshape(dr_empirical.shape)
#
# images = OrderedDict()
# images['shifted lightpos'] = np.asarray(rfwd, np.float64)-.5
# images[r'empirical lightpos $\left(\frac{dI}{dL_p}\right)$'] = dr_empirical
# images[r'predicted lightpos $\left(\frac{dI}{dL_p}\right)$'] = dr_predicted
# images['difference lightpos'] = dr_predicted-dr_empirical
#
# nonzero = images['difference lightpos'][np.nonzero(images['difference lightpos']!=0)[0]]
#
# if visualize:
# matplotlib.rcParams.update({'font.size': 18})
# plt.figure(figsize=(6*3, 2*3))
# for idx, title in enumerate(images.keys()):
# plt.subplot(1,len(list(images.keys())), idx+1)
# im = process(images[title].reshape(im_shape), vmin=-.5, vmax=.5)
# plt.title(title)
# plt.imshow(im)
#
# plt.show()
# print('lightpos: median nonzero %.2e' % (np.median(np.abs(nonzero)),))
# print('lightpos: mean nonzero %.2e' % (np.mean(np.abs(nonzero)),))
# self.assertLess(np.mean(np.abs(nonzero)), 2.4e-2)
# self.assertLess(np.median(np.abs(nonzero)), 1.2e-2)
#
#
#
# def test_color_derivatives(self):
#
# mesh, lightings, camera, frustum, renderers = self.load_basics()
#
# for renderer in renderers:
#
# im_shape = renderer.shape
# lighting = lightings[renderer.num_channels]
#
# # Get pixels and dI/dC
# mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([math.pi/2.,0,0]))
# mesh_verts = Ch(mesh.v)
# mesh_colors = Ch(mesh.vc)
#
# camera.set(v=mesh_verts)
#
# # import pdb; pdb.set_trace()
# # print '-------------------------------------------'
# #lighting.set(vc=mesh_colors, v=mesh_verts)
#
# try:
# lighting.vc = mesh_colors[:,:renderer.num_channels]
# except:
# import pdb; pdb.set_trace()
# lighting.v = mesh_verts
#
# renderer.set(v=mesh_verts, vc=lighting)
#
# r = renderer.r
# dr = renderer.dr_wrt(mesh_colors).copy()
#
# # Establish a random direction
# eps = .4
# direction = (np.random.randn(mesh.v.size).reshape(mesh.v.shape)*.1 + np.sin(mesh.v*19)*.1).flatten()
#
# # Find empirical forward derivatives in that direction
# mesh_colors = Ch(mesh.vc+direction.reshape(mesh.vc.shape)*eps/2.)
# lighting.set(vc=mesh_colors[:,:renderer.num_channels])
# renderer.set(vc=lighting)
# rfwd = renderer.r
#
# # Find empirical backward derivatives in that direction
# mesh_colors = Ch(mesh.vc-direction.reshape(mesh.vc.shape)*eps/2.)
# lighting.set(vc=mesh_colors[:,:renderer.num_channels])
# renderer.set(vc=lighting)
# rbwd = renderer.r
#
# dr_empirical = (np.asarray(rfwd, np.float64) - np.asarray(rbwd, np.float64)).ravel() / eps
#
# try:
# dr_predicted = dr.dot(col(direction.flatten())).reshape(dr_empirical.shape)
# except:
# import pdb; pdb.set_trace()
#
# images = OrderedDict()
# images['shifted colors'] = np.asarray(rfwd, np.float64)-.5
# images[r'empirical colors $\left(\frac{dI}{dC}\right)$'] = dr_empirical
# images[r'predicted colors $\left(\frac{dI}{dC}\right)$'] = dr_predicted
# images['difference colors'] = dr_predicted-dr_empirical
#
# nonzero = images['difference colors'][np.nonzero(images['difference colors']!=0)[0]]
#
# if visualize:
# matplotlib.rcParams.update({'font.size': 18})
# plt.figure(figsize=(6*3, 2*3))
# for idx, title in enumerate(images.keys()):
# plt.subplot(1,len(list(images.keys())), idx+1)
# im = process(images[title].reshape(im_shape), vmin=-.5, vmax=.5)
# plt.title(title)
# plt.imshow(im)
#
# plt.show()
# print('color: median nonzero %.2e' % (np.median(np.abs(nonzero)),))
# print('color: mean nonzero %.2e' % (np.mean(np.abs(nonzero)),))
# self.assertLess(np.mean(np.abs(nonzero)), 2e-2)
# self.assertLess(np.median(np.abs(nonzero)), 4.5e-3)
#
#
#
# def plt_imshow(im):
# #im = process(im, vmin, vmax)
# result = plt.imshow(im)
# plt.axis('off')
# plt.subplots_adjust(bottom=0.01, top=.99, left=0.01, right=.99)
# return result
#
#
# if __name__ == '__main__':
# plt.ion()
# visualize = True
# #unittest.main()
# suite = unittest.TestLoader().loadTestsFromTestCase(TestRenderer)
# unittest.TextTestRunner(verbosity=2).run(suite)
# plt.show()
# import pdb; pdb.set_trace()
#
| 38.424419 | 150 | 0.526151 |
eaf5e756e0c953776bf102676657f2ed5bca449a | 1,534 | py | Python | discord/types/template.py | jeromedontdev/discord.py | 42bab370a73440fa8af2380211ad92ccb6bf7f46 | [
"MIT"
] | 13 | 2020-12-16T06:13:11.000Z | 2021-04-15T12:01:38.000Z | discord/types/template.py | penguenn/discord.py | fb024546ffd12cda771bb58a762b2ebc824f0299 | [
"MIT"
] | 1 | 2021-05-23T16:08:10.000Z | 2021-05-23T16:08:10.000Z | discord/types/template.py | penguenn/discord.py | fb024546ffd12cda771bb58a762b2ebc824f0299 | [
"MIT"
] | 6 | 2020-12-16T00:01:24.000Z | 2021-02-05T12:32:54.000Z | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, TypedDict
from .snowflake import Snowflake
from .user import User
from .guild import Guild
class Template(TypedDict):
code: str
name: str
description: Optional[str]
usage_count: int
creator_id: Snowflake
creator: User
created_at: str
updated_at: str
source_guild_id: Snowflake
serialized_source_guild: Guild
is_dirty: Optional[bool]
| 34.088889 | 75 | 0.781617 |
323b7c09b3fbebefb973b8f765645f81383e55a0 | 473 | py | Python | libmysql_utils/libmysql_utils/test.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | 1 | 2021-07-12T11:20:58.000Z | 2021-07-12T11:20:58.000Z | libmysql_utils/libmysql_utils/test.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | null | null | null | libmysql_utils/libmysql_utils/test.py | FrederichRiver/neutrino3 | c16c6ea824999c012252d0e281473a6ab13fd38e | [
"BSD-3-Clause"
] | null | null | null | import unittest
from libmysql_utils.mysql8 import mysqlBase
from libmysql_utils.header import GLOBAL_HEADER, _IP
class TestMysql(unittest.TestCase):
def setUp(self) -> None:
return super().setUp()
def tearDown(self) -> None:
return super().tearDown()
def testConnection(self):
mysql = mysqlBase(GLOBAL_HEADER)
print(mysql._version())
def testIP(self):
print(_IP)
if __name__ == '__main__':
unittest.main()
| 20.565217 | 52 | 0.668076 |
21d10d8a453b4c243f8f4f9a60ddd3d8c589c213 | 5,865 | py | Python | freenet/handler/tundev.py | alexliyu/fdslight | d54c718dde3bb8b2ccea1e091fa24c31dad13265 | [
"BSD-2-Clause"
] | null | null | null | freenet/handler/tundev.py | alexliyu/fdslight | d54c718dde3bb8b2ccea1e091fa24c31dad13265 | [
"BSD-2-Clause"
] | null | null | null | freenet/handler/tundev.py | alexliyu/fdslight | d54c718dde3bb8b2ccea1e091fa24c31dad13265 | [
"BSD-2-Clause"
] | 1 | 2019-06-22T23:25:56.000Z | 2019-06-22T23:25:56.000Z | #!/usr/bin/env python3
import pywind.evtframework.excepts as excepts
import os, sys, socket
import pywind.evtframework.handler.handler as handler
import freenet.lib.fn_utils as fn_utils
import pywind.lib.timer as timer
try:
import fcntl
except ImportError:
pass
class tun_base(handler.handler):
__creator_fd = None
# 要写入到tun的IP包
___ip_packets_for_write = []
# 写入tun设备的最大IP数据包的个数
__MAX_WRITE_QUEUE_SIZE = 20
# 当前需要写入tun设备的IP数据包的个数
__current_write_queue_n = 0
__BLOCK_SIZE = 16 * 1024
def __create_tun_dev(self, name):
"""创建tun 设备
:param name:
:return fd:
"""
tun_fd = fn_utils.tuntap_create(name, fn_utils.IFF_TUN | fn_utils.IFF_NO_PI)
fn_utils.interface_up(name)
if tun_fd < 0:
raise SystemError("can not create tun device,please check your root")
return tun_fd
@property
def creator(self):
return self.__creator_fd
def init_func(self, creator_fd, tun_dev_name, *args, **kwargs):
"""
:param creator_fd:
:param tun_dev_name:tun 设备名称
:param subnet:如果是服务端则需要则个参数
"""
tun_fd = self.__create_tun_dev(tun_dev_name)
if tun_fd < 3:
print("error:create tun device failed:%s" % tun_dev_name)
sys.exit(-1)
self.__creator_fd = creator_fd
self.set_fileno(tun_fd)
fcntl.fcntl(tun_fd, fcntl.F_SETFL, os.O_NONBLOCK)
self.dev_init(tun_dev_name, *args, **kwargs)
return tun_fd
def dev_init(self, dev_name, *args, **kwargs):
pass
def evt_read(self):
for i in range(5):
try:
ip_packet = os.read(self.fileno, self.__BLOCK_SIZE)
except BlockingIOError:
return
self.handle_ip_packet_from_read(ip_packet)
return
def evt_write(self):
try:
ip_packet = self.___ip_packets_for_write.pop(0)
except IndexError:
self.remove_evt_write(self.fileno)
return
self.__current_write_queue_n -= 1
try:
os.write(self.fileno, ip_packet)
except BlockingIOError:
self.__current_write_queue_n += 1
self.___ip_packets_for_write.insert(0, ip_packet)
return
''''''
def handle_ip_packet_from_read(self, ip_packet):
"""处理读取过来的IP包,重写这个方法
:param ip_packet:
:return None:
"""
pass
def handle_ip_packet_for_write(self, ip_packet):
"""处理要写入的IP包,重写这个方法
:param ip_packet:
:return new_ip_packet:
"""
pass
def error(self):
self.dev_error()
def dev_error(self):
"""重写这个方法
:return:
"""
pass
def timeout(self):
self.dev_timeout()
def dev_timeout(self):
"""重写这个方法
:return:
"""
pass
def delete(self):
self.dev_delete()
def dev_delete(self):
"""重写这个方法
:return:
"""
pass
def add_to_sent_queue(self, ip_packet):
# 丢到超出规定的数据包,防止内存过度消耗
if self.__current_write_queue_n == self.__MAX_WRITE_QUEUE_SIZE:
return
self.__current_write_queue_n += 1
n_ip_message = self.handle_ip_packet_for_write(ip_packet)
if not n_ip_message:
return
self.___ip_packets_for_write.append(n_ip_message)
class tuns(tun_base):
"""服务端的tun数据处理
"""
__map = None
__timer = None
__MAP_TIMEOUT = 600
__TIMEOUT = 10
def __add_route(self, dev_name, subnet):
"""给设备添加路由
:param dev_name:
:param subnet:
:return:
"""
ip, mask_size = subnet
mask = 0
for n in range(mask_size):
mask |= 1 << (31 - n)
t = socket.inet_aton(ip)
i_ip = (t[0] << 24) | (t[1] << 16) | (t[2] << 8) | t[3]
if i_ip & mask != (i_ip):
print("error:netmask doesn't match route address")
sys.exit(-1)
cmd = "route add -net %s/%s dev %s" % (ip, mask_size, dev_name)
os.system(cmd)
def dev_init(self, tun_devname, subnet):
self.register(self.fileno)
self.add_evt_read(self.fileno)
self.__add_route(tun_devname, subnet)
self.__timer = timer.timer()
self.__map = {}
def dev_error(self):
print("error:server tun device error")
self.delete_handler(self.fileno)
def handle_ip_packet_from_read(self, ip_packet):
ip_ver = (ip_packet[0] & 0xf0) >> 4
if ip_ver != 4: return
protocol = ip_packet[9]
if protocol not in (1, 6, 17, 132,): return
daddr = ip_packet[16:20]
if daddr not in self.__map: return
fd = self.__map[daddr]
try:
self.send_message_to_handler(self.fileno, fd, ip_packet)
self.__timer.set_timeout(daddr, self.__MAP_TIMEOUT)
except excepts.HandlerNotFoundErr:
return
def handle_ip_packet_for_write(self, ip_packet):
return ip_packet
def dev_delete(self):
self.unregister(self.fileno)
os.close(self.fileno)
sys.exit(-1)
def message_from_handler(self, from_fd, ip_packet):
ip_ver = (ip_packet[0] & 0xf0) >> 4
if ip_ver != 4: return
saddr = ip_packet[12:16]
if saddr not in self.__map: self.__map[saddr] = from_fd
self.__timer.set_timeout(saddr, self.__MAP_TIMEOUT)
self.add_evt_write(self.fileno)
self.add_to_sent_queue(ip_packet)
def dev_timeout(self):
names = self.__timer.get_timeout_names()
for name in names:
if name in self.__map: del self.__map[name]
if self.__timer.exists(name): self.__timer.drop(name)
self.set_timeout(self.fileno, self.__TIMEOUT)
| 25.951327 | 84 | 0.593009 |
e1234eed0976578dc9d65e4bc92208eacb85596f | 2,548 | py | Python | tests/models/validators/v2_1_2/jsd_9480fa1f47ca9254.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 32 | 2019-09-05T05:16:56.000Z | 2022-03-22T09:50:38.000Z | tests/models/validators/v2_1_2/jsd_9480fa1f47ca9254.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 35 | 2019-09-07T18:58:54.000Z | 2022-03-24T19:29:36.000Z | tests/models/validators/v2_1_2/jsd_9480fa1f47ca9254.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 18 | 2019-09-09T11:07:21.000Z | 2022-03-25T08:49:59.000Z | # -*- coding: utf-8 -*-
"""Cisco DNA Center Update Project data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator9480Fa1F47Ca9254(object):
"""Update Project request schema definition."""
def __init__(self):
super(JSONSchemaValidator9480Fa1F47Ca9254, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"response": {
"properties": {
"taskId": {},
"url": {
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"version": {
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 31.85 | 78 | 0.598509 |
1c2d60629e50286a1c1d72d99a70b5b0dfbf9049 | 9,690 | py | Python | fastai2/text/learner.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | fastai2/text/learner.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | fastai2/text/learner.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/37_text.learner.ipynb (unless otherwise specified).
__all__ = ['match_embeds', 'load_ignore_keys', 'TextLearner', 'decode_spec_tokens', 'LMLearner',
'language_model_learner', 'text_classifier_learner']
# Cell
from ..basics import *
from .core import *
from .data import *
from .models.core import *
from .models.awdlstm import *
from ..callback.rnn import *
# Cell
def match_embeds(old_wgts, old_vocab, new_vocab):
"Convert the embedding in `wgts` to go with a new vocabulary."
bias, wgts = old_wgts.get('1.decoder.bias', None), old_wgts['0.encoder.weight']
wgts_m = wgts.mean(0)
new_wgts = wgts.new_zeros((len(new_vocab),wgts.size(1)))
if bias is not None:
bias_m = bias.mean(0)
new_bias = bias.new_zeros((len(new_vocab),))
old_o2i = old_vocab.o2i if hasattr(old_vocab, 'o2i') else {w:i for i,w in enumerate(old_vocab)}
for i,w in enumerate(new_vocab):
idx = old_o2i.get(w, -1)
new_wgts[i] = wgts[idx] if idx>=0 else wgts_m
if bias is not None: new_bias[i] = bias[idx] if idx>=0 else bias_m
old_wgts['0.encoder.weight'] = new_wgts
if '0.encoder_dp.emb.weight' in old_wgts: old_wgts['0.encoder_dp.emb.weight'] = new_wgts.clone()
old_wgts['1.decoder.weight'] = new_wgts.clone()
if bias is not None: old_wgts['1.decoder.bias'] = new_bias
return old_wgts
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
def load_ignore_keys(model, wgts):
"Load `wgts` in `model` ignoring the names of the keys, just taking parameters in order"
sd = model.state_dict()
for k1,k2 in zip(sd.keys(), wgts.keys()): sd[k1].data = wgts[k2].data.clone()
return model.load_state_dict(sd)
# Cell
@delegates(Learner.__init__)
class TextLearner(Learner):
"Basic class for a `Learner` in NLP."
def __init__(self, model, dls, alpha=2., beta=1., moms=(0.8,0.7,0.8), **kwargs):
super().__init__(model, dls, moms=moms, **kwargs)
self.add_cbs([ModelReseter(), RNNRegularizer(alpha=alpha, beta=beta)])
def save_encoder(self, file):
"Save the encoder to `self.path/self.model_dir/file`"
if rank_distrib(): return # don't save if slave proc
encoder = get_model(self.model)[0]
if hasattr(encoder, 'module'): encoder = encoder.module
torch.save(encoder.state_dict(), join_path_file(file,self.path/self.model_dir, ext='.pth'))
def load_encoder(self, file, device=None):
"Load the encoder `name` from the model directory."
encoder = get_model(self.model)[0]
if device is None: device = self.dls.device
if hasattr(encoder, 'module'): encoder = encoder.module
distrib_barrier()
encoder.load_state_dict(torch.load(join_path_file(file,self.path/self.model_dir, ext='.pth'), map_location=device))
self.freeze()
return self
def load_pretrained(self, wgts_fname, vocab_fname, model=None):
"Load a pretrained model and adapt it to the data vocabulary."
old_vocab = Path(vocab_fname).load()
new_vocab = _get_text_vocab(self.dls)
wgts = torch.load(wgts_fname, map_location = lambda storage,loc: storage)
if 'model' in wgts: wgts = wgts['model'] #Just in case the pretrained model was saved with an optimizer
wgts = match_embeds(wgts, old_vocab, new_vocab)
load_ignore_keys(self.model if model is None else model, wgts)
self.freeze()
return self
# Cell
def decode_spec_tokens(tokens):
new_toks,rule,arg = [],None,None
for t in tokens:
if t in [TK_MAJ, TK_UP, TK_REP, TK_WREP]: rule = t
elif rule is None: new_toks.append(t)
elif rule == TK_MAJ:
new_toks.append(t[:1].upper() + t[1:].lower())
rule = None
elif rule == TK_UP:
new_toks.append(t.upper())
rule = None
elif arg is None:
try: arg = int(t)
except: rule = None
else:
if rule == TK_REP: new_toks.append(t * arg)
else: new_toks += [t] * arg
return new_toks
# Cell
class LMLearner(TextLearner):
"Add functionality to `TextLearner` when dealingwith a language model"
@delegates(tokenize1)
def predict(self, text, n_words=1, no_unk=True, temperature=1., min_p=None, rm_type_tfms=0, no_bar=False,
decoder=decode_spec_tokens, **kwargs):
"Return `text` and the `n_words` that come after"
self.model.reset()
tokens = tokenize1(text, **kwargs)
tfm = self.dls.train_ds.numericalize
idxs = tfm(tokens).to(self.dls.device)
if no_unk: unk_idx = self.dls.vocab.index(UNK)
for _ in (range(n_words) if no_bar else progress_bar(range(n_words), leave=False)):
with self.no_bar(): preds,_ = self.get_preds(dl=[(idxs[None],)])
res = preds[0][-1]
if no_unk: res[unk_idx] = 0.
if min_p is not None:
if (res >= min_p).float().sum() == 0:
warn(f"There is no item with probability >= {min_p}, try a lower value.")
else: res[res < min_p] = 0.
if temperature != 1.: res.pow_(1 / temperature)
idx = torch.multinomial(res, 1).item()
idxs = torch.cat([idxs, idxs.new([idx])])
tokens = [tfm.vocab[i] for i in idxs if tfm.vocab[i] not in [BOS, PAD]]
sep = self.dls.train_ds.tokenizer
return sep.join(decoder(tokens))
@delegates(Learner.get_preds)
def get_preds(self, concat_dim=1, **kwargs): return super().get_preds(concat_dim=1, **kwargs)
# Cell
from .models.core import _model_meta
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
@delegates(Learner.__init__)
def language_model_learner(dls, arch, config=None, drop_mult=1., pretrained=True, pretrained_fnames=None, **kwargs):
"Create a `Learner` with a language model from `data` and `arch`."
vocab = _get_text_vocab(dls)
model = get_language_model(arch, len(vocab), config=config, drop_mult=drop_mult)
meta = _model_meta[arch]
learn = LMLearner(dls, model, loss_func=CrossEntropyLossFlat(), splitter=meta['split_lm'], **kwargs)
#TODO: add backard
#url = 'url_bwd' if data.backwards else 'url'
if pretrained or pretrained_fnames:
if pretrained_fnames is not None:
fnames = [learn.path/learn.model_dir/f'{fn}.{ext}' for fn,ext in zip(pretrained_fnames, ['pth', 'pkl'])]
else:
if 'url' not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta['url'] , c_key='model')
fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
learn = learn.load_pretrained(*fnames)
return learn
# Cell
@delegates(Learner.__init__)
def text_classifier_learner(dls, arch, seq_len=72, config=None, pretrained=True, drop_mult=0.5, n_out=None,
lin_ftrs=None, ps=None, max_len=72*20, **kwargs):
"Create a `Learner` with a text classifier from `data` and `arch`."
vocab = _get_text_vocab(dls)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be infered from data, set `dls.c` or pass `n_out`"
model = get_text_classifier(arch, len(vocab), n_out, seq_len=seq_len, config=config,
drop_mult=drop_mult, lin_ftrs=lin_ftrs, ps=ps, max_len=max_len)
meta = _model_meta[arch]
learn = TextLearner(dls, model, splitter=meta['split_clas'], **kwargs)
if pretrained:
if 'url' not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta['url'], c_key='model')
fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
learn = learn.load_pretrained(*fnames, model=learn.model[0])
learn.freeze()
return learn
# Cell
@typedispatch
def show_results(x: LMTensorText, y, samples, outs, ctxs=None, max_n=10, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
for i,l in enumerate(['input', 'target']):
ctxs = [b.show(ctx=c, label=l, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [b.show(ctx=c, label='pred', **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_results(x: TensorText, y, samples, outs, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorText, y:TensorCategory, samples, outs, raws, losses, trunc_at=150, **kwargs):
rows = get_empty_df(len(samples))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
for i,l in enumerate(['input', 'target']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(samples.itemgot(i),rows)]
outs = L(o + (TitledFloat(r.max().item()), TitledFloat(l.item())) for o,r,l in zip(outs, raws, losses))
for i,l in enumerate(['predicted', 'probability', 'loss']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows)) | 44.861111 | 123 | 0.642724 |
fedba58fbaab0ade3bd56e2021d6fe63c44e15fe | 6,894 | py | Python | nestimer/nestimer.py | tkhs3/nestimer | c0cb5793cf52e7513dd5dcc1882e6155ceb8d408 | [
"MIT"
] | null | null | null | nestimer/nestimer.py | tkhs3/nestimer | c0cb5793cf52e7513dd5dcc1882e6155ceb8d408 | [
"MIT"
] | null | null | null | nestimer/nestimer.py | tkhs3/nestimer | c0cb5793cf52e7513dd5dcc1882e6155ceb8d408 | [
"MIT"
] | null | null | null | import statistics
import timeit
import re
class timer(object):
dict_duration = {}
path_block_current = ""
flg_suspend = False
def __init__(self,
name,
n_digit=3,
flg_share_path_block=True):
"""record execution time for code block
args
name
str
code block name for identifying the record.
duration is recorded as a dict of list.
the key is block name, which is sequentially added to outer block name.
duration is recorded on exit.
n_digit
int
rounding point for the duration.
flg_share_path_block
bool
whether or not use shared block path as entry name between instances
its useful to track time in nested structure.
take care when use both shared block path and multi threading.
it may fail to track nested structure.
"""
self.name_block = name
self.n_digit = n_digit
self.flg_share_path_block = flg_share_path_block
def enter_block(self):
"""initialize for recording code block
"""
# set start time
self.time_start = timeit.default_timer()
self.time_prev = timeit.default_timer()
# add block name to shared current block path
self.path_block_outer = timer.path_block_current
if self.flg_share_path_block:
timer.path_block_current = "/".join(
[timer.path_block_current, self.name_block]
)
def __enter__(self):
self.enter_block()
return self
def _record_time(self,
name_entry=[],
time_prev=timeit.default_timer() ):
if timer.flg_suspend:
return
# use shared path
if self.flg_share_path_block:
path_entry = "/".join(
[timer.path_block_current] + name_entry
)
else:
path_entry = "/".join(
[self.name_block] + name_entry
)
duration = timeit.default_timer() - time_prev
timer.dict_duration.setdefault(path_entry, []).append(
round(duration, self.n_digit)
)
def record_time(self, name_entry):
"""record elapsed time within the block
args
name_entry
str
entry name for identifying the record.
duration is recorded as dict of list.
the key is current block path + entry name.
duration is calculated from the previouse recorded point.
otherwise, beginning of the block.
"""
self._record_time(name_entry=[name_entry], time_prev=self.time_prev)
self.time_prev = timeit.default_timer()
def exit_block(self):
"""finalize for recording code block
"""
self._record_time(time_prev=self.time_start)
# reset current block path
timer.path_block_current = self.path_block_outer
def __exit__(self, type, value, traceback):
self.exit_block()
@staticmethod
def get_stats(n_digit=3, filter="", dict_func_stats=None):
"""summarize recorded entries
args
n_digit
int
rounding point for the statistics.
filter
str
filter entries.
you can use regular expressions.
dict_func_stats
dict
summarize entries using arbitrary functions in addition to sum and mean.
"""
_re = re.compile(filter)
dict_mean = {
k : round(statistics.mean(v), n_digit)
for k, v in timer.dict_duration.items()
if _re.search(k)
}
dict_sum = {
k : round(sum(v), n_digit)
for k, v in timer.dict_duration.items()
if _re.search(k)
}
dict_stats = {"mean":dict_mean, "sum":dict_sum}
if dict_func_stats is not None:
for name, func in dict_func_stats.items():
dict_any_stats = {
k : round(func(v), n_digit)
for k, v in timer.dict_duration.items()
if _re.search(k)
}
dict_stats[name] = dict_any_stats
return dict_stats
def capture(name=None,
n_digit=3,
flg_share_path_block=True,
name_obj_timer=None
):
"""record execution time for functions,
please use as a decorator
args
name
str
code block name for identifying the record.
duration is recorded as a dict of list.
the key is block name, which is sequentially added to outer block name.
duration is recorded on exit.
n_digit
int
rounding point for the duration.
flg_share_path_block
bool
whether or not use shared block path as entry name between instances
its useful to track time in nested structure.
take care when use both shared block path and multi threading.
it may fail to track nested structure.
name_obj_timer
str
symbol name for referencing nestimer.timer instance inside the function.
"""
def _capture(function, *args, **kwargs):
def _func(*args, **kwargs):
# set block name
name_func = function.__name__
if name is None:
name_block = name_func
else:
name_block = name
# start recording
with timer(name=name_block, n_digit=n_digit, flg_share_path_block=flg_share_path_block) as t :
# execute functions
if name_obj_timer is None:
result = function(*args, **kwargs)
else:
_dict = {name_obj_timer : t}
kwargs.setdefault(name_obj_timer, None)
kwargs.pop(name_obj_timer)
result = function(*args, **kwargs, **_dict)
return result
return _func
return _capture
| 34.643216 | 107 | 0.507543 |
fe59f627c054cd60740ebbe68294e73b2777c721 | 4,653 | py | Python | week8/collaborative_filter.py | Emmayyyyy/dso-560-nlp-and-text-analytics | 76bde7d0ed7e760b5de455251a523e92a10116fd | [
"MIT"
] | 19 | 2019-03-06T02:34:41.000Z | 2021-12-28T23:06:57.000Z | week8/collaborative_filter.py | Emmayyyyy/dso-560-nlp-and-text-analytics | 76bde7d0ed7e760b5de455251a523e92a10116fd | [
"MIT"
] | null | null | null | week8/collaborative_filter.py | Emmayyyyy/dso-560-nlp-and-text-analytics | 76bde7d0ed7e760b5de455251a523e92a10116fd | [
"MIT"
] | 54 | 2020-03-10T06:43:24.000Z | 2022-03-22T22:20:28.000Z | import pandas as pd
from pyspark.mllib.recommendation import ALS, Rating
from pyspark.sql import SparkSession, SQLContext
from sklearn.metrics.pairwise import cosine_similarity
if __name__ == "__main__": # run this by typing "python collaborative_filter.py"
app_name = "collab_filter_example"
# create a Spark context
spark = SparkSession.builder.master("local").appName(app_name).getOrCreate()
# create a Spark SQL context to allow us to run SQL commands
sql_context = SQLContext(spark.sparkContext)
df = spark.read.csv("ratings_small.csv", header=True, sep=",")
print(df) # DataFrame[userId: string, movieId: string, rating: string, timestamp: string]
df.createOrReplaceTempView("ratings")
print(df.count())
# we'll filter out ratings from users with less than 10 ratings, and from films with less than 20 ratings
df = sql_context.sql("SELECT * "
"FROM ratings "
"WHERE userID IN (SELECT userID "
"FROM ratings GROUP BY userID HAVING COUNT(*) >= 10) "
"AND movieID IN (SELECT movieID FROM ratings GROUP BY movieID "
"HAVING COUNT(*) >= 20)")
# load in the movies names
movies = spark.read.csv("movies.csv", header=True, sep=",")
print(movies) # DataFrame[userId: string, movieId: string, rating: string]
movies.createOrReplaceTempView("movies") # create a SQL table called movies
# the Alternating Least Squares model that PySpark uses requires you to have a Rating object for each
# row, with the user ID, product ID, and rating as the three columns. So I am mapping the dataframe from a
# regular dataframe into an RDD (resilient distributed dataset) of Ratings objects
ratings = df.rdd.map(lambda l: Rating(int(l[0]), int(l[1]), float(l[2])))
print(ratings) # PythonRDD[13] at RDD at PythonRDD.scala:53
# at this point, conceptually, we have a matrix that is U x P. U is the # of distinct users. P is the # of distinct
# products, or films, in this case.
rank = 10 # this is the number of dimensions D we want to reduce down to
numIterations = 15
# In collaborative filter, there is a U x P original matrix that is made up of two smaller
# U x D and D x P matrices. The D represents the number of reduced dimensions - in this case 20 (the rank variable).
# Also remember that the original U x P matrix has lots of missing values in it, since most users have not
# watched/rated most films. The ALS model will iteratively try to update the values in the U x D and D x P matrices
# until they match as close to possible the known values in the U x P (the original user-product ratings matrix).
model = ALS.train(ratings, rank, numIterations)
# Now that the model has finished, we have two new completely updated matrices: U x D and D x P. We care about the
# D x P matrix. This basically represents out reduced dimensions for each product (film). For each film, we get this
# vector. It will be of size 20.
# get the film features (each row will be a tuple - (film_id, array of features representing film size 20)
film_features = model.productFeatures()
spark.createDataFrame(film_features) \
.toDF("film_id", "features") \
.createOrReplaceTempView("film_features") # from the film_features, create a sql table called product_features
pandas_film_features_df = sql_context.sql("SELECT m.original_title as film, ff.features "
"FROM film_features ff "
"JOIN movies m ON m.id = ff.film_id").toPandas()
print(pandas_film_features_df) # now it's just another normal pandas dataframe, with a film column
# that contains the filmn name, a column with the film ID, and another column with numpy arrays representing the
# reduced dimensional vector that represents a film
film_names = list(pandas_film_features_df["film"].values)
film_similarities = pd.DataFrame(cosine_similarity(list(pandas_film_features_df["features"].values)), index=film_names,
columns=film_names).transpose()
similarities_df = film_similarities.unstack().reset_index()
similarities_df.columns = ["film1", "film2", "similarity"]
similarities_df = similarities_df[similarities_df["similarity"] < 0.99999999]
similarities_df = similarities_df[similarities_df["similarity"] >= 0.50]
similarities_df.sort_values(by="similarity", ascending=False, inplace=True)
similarities_df.to_csv("similarities.csv")
| 56.743902 | 123 | 0.691382 |
537b3d95a223498fd6b5aee18aec3959bab885be | 807 | py | Python | test/backup_transform_mysqlctld.py | paralin/vitess | 7b048c5442679ce6cf48773cf17a184c1ce91295 | [
"Apache-2.0"
] | 8 | 2017-08-14T15:19:04.000Z | 2021-06-07T10:36:52.000Z | test/backup_transform_mysqlctld.py | paralin/vitess | 7b048c5442679ce6cf48773cf17a184c1ce91295 | [
"Apache-2.0"
] | 19 | 2020-09-25T15:41:41.000Z | 2022-03-25T23:06:54.000Z | test/backup_transform_mysqlctld.py | paralin/vitess | 7b048c5442679ce6cf48773cf17a184c1ce91295 | [
"Apache-2.0"
] | 7 | 2021-03-07T03:24:39.000Z | 2022-02-16T06:46:10.000Z | #!/usr/bin/env python
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-runs backup_transform.py with use_mysqlctld=True."""
import backup_transform
import utils
if __name__ == '__main__':
backup_transform.use_mysqlctld = True
utils.main(backup_transform)
| 31.038462 | 74 | 0.760843 |
515e0b720dc88bf0d4f68ee6b19b10cbc8c9e3af | 635 | py | Python | src/old/main.py | plug-obp/plug-remote-python | 9b57989e3536b34fbbd7d6cafbc674ff6f4686eb | [
"MIT"
] | null | null | null | src/old/main.py | plug-obp/plug-remote-python | 9b57989e3536b34fbbd7d6cafbc674ff6f4686eb | [
"MIT"
] | null | null | null | src/old/main.py | plug-obp/plug-remote-python | 9b57989e3536b34fbbd7d6cafbc674ff6f4686eb | [
"MIT"
] | 1 | 2020-01-28T13:44:52.000Z | 2020-01-28T13:44:52.000Z | "Imports"
import sys
import getopt
from old import remote
def main(argv, model):
"Main function for remote"
## Read arguments
port = "1238"
try:
opts, _ = getopt.getopt(argv, "hp:", ["port="])
except getopt.GetoptError:
print ( 'remote.py -p port' )
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ( 'remote.py -p port' )
sys.exit()
elif opt in ("-p", "--port"):
port = arg
print ( "Starting model on port " + port )
#################
## Start model ##
#################
remote.run(int(port), model)
| 19.84375 | 55 | 0.488189 |
b49a95fb3ea53d62f96febe3ce242b422118e3fb | 9,486 | py | Python | ckan/lib/jobs.py | singularita/ckan | 81d9e44b71f9ed6ec607cb0eb966265a2c524b90 | [
"Apache-2.0"
] | 1 | 2018-01-09T18:34:13.000Z | 2018-01-09T18:34:13.000Z | ckan/lib/jobs.py | singularita/ckan | 81d9e44b71f9ed6ec607cb0eb966265a2c524b90 | [
"Apache-2.0"
] | 4 | 2018-07-25T07:09:35.000Z | 2019-01-23T07:21:23.000Z | ckan/lib/jobs.py | Pilchards/ckan | 729480f82345df1e2d753c94c5e0541a2aff9bd8 | [
"Apache-2.0"
] | 1 | 2017-11-03T14:55:25.000Z | 2017-11-03T14:55:25.000Z | #!/usr/bin/env python
# encoding: utf-8
u'''
Asynchronous background jobs.
Note that most job management functions are not available from this
module but via the various ``job_*`` API functions.
Internally, RQ queue names are prefixed with a string containing the
CKAN site ID to avoid key collisions when the same Redis database is
used for multiple CKAN instances. The functions of this module expect
unprefixed queue names (e.g. ``'default'``) unless noted otherwise. The
raw RQ objects (e.g. a queue returned by ``get_queue``) use the full,
prefixed names. Use the functions ``add_queue_name_prefix`` and
``remove_queue_name_prefix`` to manage queue name prefixes.
.. versionadded:: 2.7
'''
import logging
import rq
from rq.connections import push_connection
from rq.exceptions import NoSuchJobError
from rq.job import Job
from rq.utils import ensure_list
from ckan.lib.redis import connect_to_redis
from ckan.common import config
from ckan.config.environment import load_environment
from ckan.model import meta
log = logging.getLogger(__name__)
DEFAULT_QUEUE_NAME = u'default'
# RQ job queues. Do not use this directly, use ``get_queue`` instead.
_queues = {}
def _connect():
u'''
Connect to Redis and tell RQ about it.
Workaround for https://github.com/nvie/rq/issues/479.
'''
conn = connect_to_redis()
push_connection(conn)
return conn
def _get_queue_name_prefix():
u'''
Get the queue name prefix.
'''
# This must be done at runtime since we need a loaded config
return u'ckan:{}:'.format(config[u'ckan.site_id'])
def add_queue_name_prefix(name):
u'''
Prefix a queue name.
.. seealso:: :py:func:`remove_queue_name_prefix`
'''
return _get_queue_name_prefix() + name
def remove_queue_name_prefix(name):
u'''
Remove a queue name's prefix.
:raises ValueError: if the given name is not prefixed.
.. seealso:: :py:func:`add_queue_name_prefix`
'''
prefix = _get_queue_name_prefix()
if not name.startswith(prefix):
raise ValueError(u'Queue name "{}" is not prefixed.'.format(name))
return name[len(prefix):]
def get_all_queues():
u'''
Return all job queues currently in use.
:returns: The queues.
:rtype: List of ``rq.queue.Queue`` instances
.. seealso:: :py:func:`get_queue`
'''
redis_conn = _connect()
prefix = _get_queue_name_prefix()
return [q for q in rq.Queue.all(connection=redis_conn) if
q.name.startswith(prefix)]
def get_queue(name=DEFAULT_QUEUE_NAME):
u'''
Get a job queue.
The job queue is initialized if that hasn't happened before.
:param string name: The name of the queue. If not given then the
default queue is returned.
:returns: The job queue.
:rtype: ``rq.queue.Queue``
.. seealso:: :py:func:`get_all_queues`
'''
global _queues
fullname = add_queue_name_prefix(name)
try:
return _queues[fullname]
except KeyError:
log.debug(u'Initializing background job queue "{}"'.format(name))
redis_conn = _connect()
queue = _queues[fullname] = rq.Queue(fullname, connection=redis_conn)
return queue
def enqueue(fn, args=None, kwargs=None, title=None, queue=DEFAULT_QUEUE_NAME):
u'''
Enqueue a job to be run in the background.
:param function fn: Function to be executed in the background
:param list args: List of arguments to be passed to the function.
Pass an empty list if there are no arguments (default).
:param dict kwargs: Dict of keyword arguments to be passed to the
function. Pass an empty dict if there are no keyword arguments
(default).
:param string title: Optional human-readable title of the job.
:param string queue: Name of the queue. If not given then the
default queue is used.
:returns: The enqueued job.
:rtype: ``rq.job.Job``
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
job = get_queue(queue).enqueue_call(func=fn, args=args, kwargs=kwargs)
job.meta[u'title'] = title
job.save()
msg = u'Added background job {}'.format(job.id)
if title:
msg = u'{} ("{}")'.format(msg, title)
msg = u'{} to queue "{}"'.format(msg, queue)
log.info(msg)
return job
def job_from_id(id):
u'''
Look up an enqueued job by its ID.
:param string id: The ID of the job.
:returns: The job.
:rtype: ``rq.job.Job``
:raises KeyError: if no job with that ID exists.
'''
try:
return Job.fetch(id, connection=_connect())
except NoSuchJobError:
raise KeyError(u'There is no job with ID "{}".'.format(id))
def dictize_job(job):
u'''Convert a job to a dict.
In contrast to ``rq.job.Job.to_dict`` this function includes only
the attributes that are relevant to our use case and promotes the
meta attributes that we use (e.g. ``title``).
:param rq.job.Job job: The job to dictize.
:returns: The dictized job.
:rtype: dict
'''
return {
u'id': job.id,
u'title': job.meta.get(u'title'),
u'created': job.created_at.strftime(u'%Y-%m-%dT%H:%M:%S'),
u'queue': remove_queue_name_prefix(job.origin),
}
def test_job(*args):
u'''Test job.
A test job for debugging purposes. Prints out any arguments it
receives. Can be scheduled via ``paster jobs test``.
'''
print(args)
class Worker(rq.Worker):
u'''
CKAN-specific worker.
Note that starting an instance of this class (via the ``work``
method) disposes the currently active database engine and the
associated session. This is necessary to prevent their corruption by
the forked worker process. Both the engine and the session
automatically re-initialize afterwards once they are used. However,
non-committed changes are rolled back and instance variables bound
to the old session have to be re-fetched from the database.
'''
def __init__(self, queues=None, *args, **kwargs):
u'''
Constructor.
Accepts the same arguments as the constructor of
``rq.worker.Worker``. However, the behavior of the ``queues``
parameter is different.
:param queues: The job queue(s) to listen on. Can be a string
with the name of a single queue or a list of queue names.
If not given then the default queue is used.
'''
queues = queues or [DEFAULT_QUEUE_NAME]
queues = [get_queue(q) for q in ensure_list(queues)]
rq.worker.logger.setLevel(logging.INFO)
super(Worker, self).__init__(queues, *args, **kwargs)
def register_birth(self, *args, **kwargs):
result = super(Worker, self).register_birth(*args, **kwargs)
names = [remove_queue_name_prefix(n) for n in self.queue_names()]
names = u', '.join(u'"{}"'.format(n) for n in names)
log.info(u'Worker {} (PID {}) has started on queue(s) {} '.format(
self.key, self.pid, names))
return result
def execute_job(self, job, *args, **kwargs):
# We shut down all database connections and the engine to make sure
# that they are not shared with the child process and closed there
# while still being in use in the main process, see
#
# https://github.com/ckan/ckan/issues/3365
#
# Note that this rolls back any non-committed changes in the session.
# Both `Session` and `engine` automatically re-initialize themselve
# when they are used the next time.
log.debug(u'Disposing database engine before fork')
meta.Session.remove()
meta.engine.dispose()
# The original implementation performs the actual fork
queue = remove_queue_name_prefix(job.origin)
log.info(u'Worker {} starts job {} from queue "{}"'.format(
self.key, job.id, queue))
result = super(Worker, self).execute_job(job, *args, **kwargs)
log.info(u'Worker {} has finished job {} from queue "{}"'.format(
self.key, job.id, queue))
return result
def register_death(self, *args, **kwargs):
result = super(Worker, self).register_death(*args, **kwargs)
log.info(u'Worker {} (PID {}) has stopped'.format(self.key, self.pid))
return result
def handle_exception(self, job, *exc_info):
log.exception(u'Job {} on worker {} raised an exception: {}'.format(
job.id, self.key, exc_info[1]))
return super(Worker, self).handle_exception(job, *exc_info)
def main_work_horse(self, job, queue):
# This method is called in a worker's work horse process right
# after forking.
load_environment(config[u'global_conf'], config)
return super(Worker, self).main_work_horse(job, queue)
def perform_job(self, *args, **kwargs):
result = super(Worker, self).perform_job(*args, **kwargs)
# rq.Worker.main_work_horse does a hard exit via os._exit directly
# after its call to perform_job returns. Hence here is the correct
# location to clean up.
try:
meta.Session.remove()
except Exception:
log.exception(u'Error while closing database session')
try:
meta.engine.dispose()
except Exception:
log.exception(u'Error while disposing database engine')
return result
| 31.832215 | 78 | 0.652751 |
40d05e713c173906fd9e5ba96a9335042741b960 | 18,115 | py | Python | src/backend/utils/terraform.py | ddddhm1/LuWu | f9feaf10a6aca0dd31f250741a1c542ee5256633 | [
"Apache-2.0"
] | 658 | 2019-04-29T02:46:02.000Z | 2022-03-30T03:58:42.000Z | src/backend/utils/terraform.py | ddddhm1/LuWu | f9feaf10a6aca0dd31f250741a1c542ee5256633 | [
"Apache-2.0"
] | 9 | 2020-06-04T13:38:58.000Z | 2022-02-27T21:23:29.000Z | src/backend/utils/terraform.py | ddddhm1/LuWu | f9feaf10a6aca0dd31f250741a1c542ee5256633 | [
"Apache-2.0"
] | 130 | 2019-05-02T23:42:58.000Z | 2022-03-24T04:35:37.000Z | import base64
import contextlib
import logging
import time
from tempfile import NamedTemporaryFile
from tempfile import TemporaryDirectory
from typing import List
from python_terraform import IsFlagged
from python_terraform import Terraform as TF
from terrascript import Data
from terrascript import Output
from terrascript import Provider
from terrascript import Resource
from terrascript import Terrascript
from terrascript import provider
from terrascript import provisioner
from terrascript import resource
from terrascript import terraform
from core.config import PROJECT_NAME
from core.config import TERRAFORM_WORK_DIR
from utils.template import TemplateRender
class Terraform:
DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock"
DEFAULT_DOCKER_ENTRYPOINT_PATH = "/docker-entrypoint.sh"
DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = "/nginx.docker-entrypoint.sh"
DEFAULT_NGINX_DOCKER_IMAGE = "nginx:stable-alpine"
DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = "/usr/share/nginx/html"
DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
DEFAULT_SSH_USER = "root"
DEFAULT_SSH_PORT = 22
TERRAFORM_RESOURCE_FILE = "file"
# trick for terrascript
class null_resource(Resource):
...
class tencentcloud(Provider):
...
class tencentcloud_availability_zones(Data):
...
class tencentcloud_images(Data):
...
class tencentcloud_instance_types(Data):
...
class tencentcloud_security_group(Resource):
...
class tencentcloud_security_group_lite_rule(Resource):
...
class tencentcloud_instance(Resource):
...
class tencentcloud_key_pair(Resource):
...
class alicloud(Provider):
...
class alicloud_vpc(Resource):
...
class alicloud_key_pair(Resource):
...
class alicloud_security_group(Resource):
...
class alicloud_security_group_rule(Resource):
...
class alicloud_instance(Resource):
...
class alicloud_vswitch(Resource):
...
class alicloud_zones(Data):
...
class vultr(Provider):
...
class vultr_server(Resource):
...
class vultr_ssh_key(Resource):
...
def __init__(self):
self.work_dir = TERRAFORM_WORK_DIR
self.app = TF(working_dir=self.work_dir)
@contextlib.contextmanager
def terraform_workspace(self):
workspace = f"terraform_workspace_{int(time.time())}"
self.app.create_workspace(workspace)
tmp_dir = TemporaryDirectory()
yield tmp_dir.name
self.app.set_workspace("default")
self.app.cmd("workspace delete", workspace, force=IsFlagged)
@contextlib.contextmanager
def patch_terraform_docker_ssh_conn(self):
# patch ssh config
yield
# clear ssh config
def write_terraform_config(self, config: Terrascript, dir_path: str):
tmp_config_file = NamedTemporaryFile(
mode="wt", suffix=".tf.json", dir=dir_path, delete=False
)
logging.info(str(config))
tmp_config_file.write(str(config))
tmp_config_file.seek(0)
self.app.init(
dir_path
# disable maual plugin because it changes toooo fast
# dir_path, plugin_dir=f"{self.work_dir}/plugins",
)
return tmp_config_file
def run_terraform_plan(self, config: Terrascript):
with self.terraform_workspace() as tw_dir:
self.write_terraform_config(config, tw_dir)
plan = self.app.plan(tw_dir, no_color=IsFlagged)
return plan
def run_terraform_apply(self, config: Terrascript):
with self.terraform_workspace() as tw_dir:
self.write_terraform_config(config, tw_dir)
self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)
output_var = {
output_var_key: output_result[output_var_key]["value"]
for output_var_key in output_result
}
return output_var
def run_terraform_destroy(self, config: Terrascript):
with self.terraform_workspace() as tw_dir:
self.write_terraform_config(config, tw_dir)
destroy_result = self.app.destroy(tw_dir)
return destroy_result
@classmethod
def gen_digital_ocean_config(
cls, config_data: dict, token: str, public_key: str = None
):
do_config = Terrascript()
do_provider = provider.digitalocean(token=token)
do_droplet_resource = resource.digitalocean_droplet(
"server",
image=config_data["os_code"],
name=config_data["hostname"],
region=config_data["region_code"],
size=config_data["plan_code"],
ssh_keys=config_data["ssh_keys"] if config_data.get("ssh_keys") else [],
)
if public_key:
digitalocean_ssh_key = resource.digitalocean_ssh_key(
"digitalocean_ssh_key", name="default", public_key=public_key,
)
do_droplet_resource["ssh_keys"] += [
"${digitalocean_ssh_key.digitalocean_ssh_key.id}"
]
do_config += digitalocean_ssh_key
do_output_ip = Output("ip", value="${digitalocean_droplet.server.ipv4_address}")
do_output_id = Output("server_id", value="${digitalocean_droplet.server.id}")
do_config += do_provider
do_config += do_droplet_resource
do_config += do_output_ip
do_config += do_output_id
return do_config
@classmethod
def gen_vultr_config(cls, config_data: dict, token: str, public_key: str = None):
vultr_config = Terrascript()
vultr_provider = cls.vultr(api_key=token, rate_limit=700, retry_limit=3)
vultr_server = cls.vultr_server(
"server",
plan_id=config_data["plan_code"],
region_id=config_data["region_code"],
os_id=config_data["os_code"],
hostname=config_data["hostname"],
ssh_key_ids=config_data["ssh_keys"] if config_data.get("ssh_keys") else [],
)
vultr_output_ip = Output("ip", value="${vultr_server.server.main_ip}")
vultr_output_id = Output("server_id", value="${vultr_server.server.id}")
if public_key:
vultr_ssh_key = cls.vultr_ssh_key(
"vultr_ssh_key", name="default_key", ssh_key=public_key
)
vultr_server["ssh_key_ids"] += ["${vultr_ssh_key.vultr_ssh_key.id}"]
vultr_config += vultr_ssh_key
vultr_config += vultr_provider
vultr_config += vultr_server
vultr_config += vultr_output_ip
vultr_config += vultr_output_id
return vultr_config
@classmethod
def gen_tencent_cloud_config(
cls,
config_data: dict,
token: str,
public_key_name: str = None,
secret_id: str = None,
):
tencent_cloud_config = Terrascript()
tencent_terraform = terraform(
**{
"required_providers": {
"tencentcloud": {
"source": "terraform-providers/tencentcloud",
"version": "~> 1.40.3",
},
}
}
)
tencent_cloud_provider = cls.tencentcloud(
secret_id=secret_id, secret_key=token, region=config_data["region_code"],
)
tencent_zone = cls.tencentcloud_availability_zones("default")
tencent_security_group = cls.tencentcloud_security_group(
"default", name="all-open", description="open all ports"
)
tencent_security_group_rule = cls.tencentcloud_security_group_lite_rule(
"rule",
security_group_id="${tencentcloud_security_group.default.id}",
ingress=[
"ACCEPT#10.0.0.0/8#ALL#ALL",
],
egress=[
"ACCEPT#10.0.0.0/8#ALL#ALL",
],
)
tencent_cloud_server = cls.tencentcloud_instance(
"server",
instance_name=config_data["hostname"],
availability_zone="${data.tencentcloud_availability_zones.default.zones.0.name}",
image_id=config_data["os_code"],
instance_type=config_data["plan_code"],
disable_monitor_service=True,
disable_security_service=True,
allocate_public_ip=True,
internet_max_bandwidth_out=5,
instance_charge_type="POSTPAID_BY_HOUR",
internet_charge_type="TRAFFIC_POSTPAID_BY_HOUR",
system_disk_type="CLOUD_SSD",
count=1,
)
tencent_output_ip = Output(
"ip", value="${tencentcloud_instance.server.0.public_ip}"
)
tencent_output_id = Output(
"server_id", value="${tencentcloud_instance.server.0.id}"
)
if public_key_name:
tencent_cloud_server["key_name"] = public_key_name
tencent_cloud_config += tencent_terraform
tencent_cloud_config += tencent_cloud_provider
tencent_cloud_config += tencent_zone
tencent_cloud_config += tencent_security_group
tencent_cloud_config += tencent_security_group_rule
tencent_cloud_config += tencent_cloud_server
tencent_cloud_config += tencent_output_ip
tencent_cloud_config += tencent_output_id
return tencent_cloud_config
@classmethod
def gen_ali_cloud_config(
cls,
config_data: dict,
token: str,
ssh_key_name: str = None,
access_key: str = None,
security_groups: List[str] = [],
):
ali_cloud_config = Terrascript()
ali_cloud_provider = cls.alicloud(
access_key=access_key, secret_key=token, region=config_data["region_code"],
)
ali_zone = cls.alicloud_zones(
"default",
available_disk_category="cloud_efficiency",
available_resource_creation="Instance",
)
ali_vpc = cls.alicloud_vpc("vpc", cidr_block="172.16.0.0/12",)
ali_vswitch = cls.alicloud_vswitch(
"vswitch",
vpc_id="${alicloud_vpc.vpc.id}",
cidr_block="172.16.0.0/29",
availability_zone="${data.alicloud_zones.default.zones.0.id}",
)
ali_security_group = cls.alicloud_security_group(
"group",
name="all-open",
vpc_id="${alicloud_vpc.vpc.id}",
description="open all ports",
inner_access_policy="Accept",
)
ali_internet_security_group_rule = cls.alicloud_security_group_rule(
"internet",
# nic_type="internet",
security_group_id="${alicloud_security_group.group.id}",
type="ingress",
port_range="-1/-1",
cidr_ip="0.0.0.0/0",
ip_protocol="all",
policy="accept",
)
ali_intranet_security_group_rule = cls.alicloud_security_group_rule(
"intranet",
# nic_type="intranet",
security_group_id="${alicloud_security_group.group.id}",
port_range="-1/-1",
type="egress",
cidr_ip="0.0.0.0/0",
ip_protocol="all",
policy="accept",
priority=1,
)
ali_cloud_server = cls.alicloud_instance(
"server",
instance_name=config_data["hostname"],
availability_zone="${data.alicloud_zones.default.zones.0.id}",
# security_groups=security_groups,
security_groups="${alicloud_security_group.group.*.id}",
vswitch_id="${alicloud_vswitch.vswitch.id}",
image_id=config_data["os_code"],
instance_type=config_data["plan_code"],
security_enhancement_strategy="Deactive",
instance_charge_type="PostPaid",
internet_charge_type="PayByTraffic",
internet_max_bandwidth_out=2,
)
ali_output_ip = Output("ip", value="${alicloud_instance.server.public_ip}")
ali_output_id = Output("server_id", value="${alicloud_instance.server.id}")
if ssh_key_name:
ali_cloud_server["key_name"] = ssh_key_name
ali_cloud_config += ali_cloud_provider
ali_cloud_config += ali_zone
ali_cloud_config += ali_vpc
ali_cloud_config += ali_vswitch
ali_cloud_config += ali_security_group
ali_cloud_config += ali_internet_security_group_rule
ali_cloud_config += ali_intranet_security_group_rule
ali_cloud_config += ali_cloud_server
ali_cloud_config += ali_output_ip
ali_cloud_config += ali_output_id
return ali_cloud_config
@classmethod
def add_ssh_key_config(cls, public_key: str):
return provisioner(
"remote-exec",
provisioner=provisioner(
"remote-exec",
inline=["mkdir -p ~/.ssh", f"{public_key} >> ~/.ssh/authorized_keys"],
),
)
@classmethod
def gen_ssh_conn_config(
cls,
*,
ssh_user: str = DEFAULT_SSH_USER,
ssh_private_key: str,
ssh_host: str,
ssh_port: int = DEFAULT_SSH_PORT,
) -> dict:
# see more in https://www.terraform.io/docs/provisioners/connection.html
return {
"type": "ssh",
"user": ssh_user,
"private_key": ssh_private_key,
"host": ssh_host,
"port": ssh_port,
"timeout": "30s",
}
@classmethod
def gen_site_docker_deploy_config(
cls,
*,
docker_host: str = DEFAULT_DOCKER_HOST,
site_name: str = None,
template_tar_bytes: bytes = None,
script: str = None,
ssh_user: str = DEFAULT_SSH_USER,
ssh_private_key: str,
ssh_host: str,
ssh_port: int = DEFAULT_SSH_PORT,
):
config = Terrascript()
docker_provider = provider.docker(
host=docker_host,
connection=cls.gen_ssh_conn_config(
ssh_user=ssh_user,
ssh_private_key=ssh_private_key,
ssh_host=ssh_host,
ssh_port=ssh_port,
),
)
docker_image_resource = resource.docker_image(
"nginx_image", name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
)
docker_container_resource = resource.docker_container(
"nginx_container",
name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
image="${docker_image.nginx_image.latest}",
restart="always",
start=True,
ports={"internal": 80},
upload=[],
)
docker_name_resource = resource.random_pet("docker_pet_name", length=1,)
if template_tar_bytes:
template_tar_file = (
f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz",
)
template_tar_file_content = base64.b64encode(template_tar_bytes).decode(
"utf8"
)
template_tar_path = (
f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
)
# self.upload_file(
# content='conf/myapp.conf',
# destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
# ssh_user=ssh_user,
# ssh_private_key=ssh_private_key,
# ssh_host=ssh_host,
# ssh_port=ssh_port
# )
docker_container_resource["upload"].append(
{"content_base64": template_tar_file_content, "file": template_tar_path}
)
if script:
entrypoint_sh_content = TemplateRender().render(
cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH,
)
docker_container_resource["upload"].append(
{
"content": entrypoint_sh_content,
"file": cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
}
)
config += docker_provider
config += docker_image_resource
config += docker_container_resource
config += docker_name_resource
return config
def remote_exec(
self,
*,
ssh_user: str = DEFAULT_SSH_USER,
ssh_private_key: str,
ssh_host: str,
ssh_port: int = DEFAULT_SSH_PORT,
):
exec_config = Terrascript()
ssh_conn = self.gen_ssh_conn_config(
ssh_user=ssh_user,
ssh_private_key=ssh_private_key,
ssh_host=ssh_host,
ssh_port=ssh_port,
)
exec_resource = self.null_resource(
"remote-exec",
provisioner=provisioner(
"remote-exec", inline=["ls -la"], connection=ssh_conn
),
)
exec_config += exec_resource
return exec_config
def upload_file(
self,
content: str,
*,
destination: str = DEFAULT_UPLOAD_PATH,
ssh_user: str = DEFAULT_SSH_USER,
ssh_private_key: str,
ssh_host: str,
ssh_port: int = DEFAULT_SSH_PORT,
):
upload_config = Terrascript()
ssh_conn = self.gen_ssh_conn_config(
ssh_user=ssh_user,
ssh_private_key=ssh_private_key,
ssh_host=ssh_host,
ssh_port=ssh_port,
)
file_resource = self.null_resource(
"upload_file_resource",
provisioner=provisioner(
self.TERRAFORM_RESOURCE_FILE,
content=content,
destination=destination,
connection=ssh_conn,
),
)
upload_config += file_resource
return upload_config
| 32.996357 | 93 | 0.606238 |
172ec2af796e9f2949b68d10d19c929f4f97c96e | 12,519 | py | Python | var/spack/repos/builtin/packages/octave/package.py | mtmiller/spack | c97c135f1dbe24955048fcc4f0f98281ef0c9300 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-10-04T20:05:45.000Z | 2021-10-04T20:05:45.000Z | var/spack/repos/builtin/packages/octave/package.py | mtmiller/spack | c97c135f1dbe24955048fcc4f0f98281ef0c9300 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2021-05-12T05:53:01.000Z | 2022-03-18T17:30:25.000Z | var/spack/repos/builtin/packages/octave/package.py | mtmiller/spack | c97c135f1dbe24955048fcc4f0f98281ef0c9300 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-11-06T06:38:51.000Z | 2020-10-27T07:45:01.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
import re
import shutil
import sys
import tempfile
import spack.util.environment
class Octave(AutotoolsPackage, GNUMirrorPackage):
"""GNU Octave is a high-level language, primarily intended for numerical
computations.
It provides a convenient command line interface for solving linear and
nonlinear problems numerically, and for performing other numerical
experiments using a language that is mostly compatible with Matlab.
It may also be used as a batch-oriented language.
"""
homepage = "https://www.gnu.org/software/octave/"
gnu_mirror_path = "octave/octave-4.0.0.tar.gz"
maintainers = ['mtmiller']
extendable = True
version('6.3.0', sha256='232065f3a72fc3013fe9f17f429a3df69d672c1f6b6077029a31c8f3cd58a66e')
version('6.2.0', sha256='457d1fda8634a839e2fd7cfc55b98bd56f36b6ae73d31bb9df43dde3012caa7c')
version('6.1.0', sha256='6ff34e401658622c44094ecb67e497672e4337ca2d36c0702d0403ecc60b0a57')
version('5.2.0', sha256='2fea62b3c78d6f38e9451da8a4d26023840725977dffee5250d3d180f56595e1')
version('5.1.0', sha256='e36b1124cac27c7caa51cc57de408c31676d5f0096349b4d50b57bfe1bcd7495')
version('4.4.1', sha256='09fbd0f212f4ef21e53f1d9c41cf30ce3d7f9450fb44911601e21ed64c67ae97')
version('4.4.0', sha256='72f846379fcec7e813d46adcbacd069d72c4f4d8f6003bcd92c3513aafcd6e96')
version('4.2.2', sha256='77b84395d8e7728a1ab223058fe5e92dc38c03bc13f7358e6533aab36f76726e')
version('4.2.1', sha256='80c28f6398576b50faca0e602defb9598d6f7308b0903724442c2a35a605333b')
version('4.2.0', sha256='443ba73782f3531c94bcf016f2f0362a58e186ddb8269af7dcce973562795567')
version('4.0.2', sha256='39cd8fd36c218fc00adace28d74a6c7c9c6faab7113a5ba3c4372324c755bdc1')
version('4.0.0', sha256='4c7ee0957f5dd877e3feb9dfe07ad5f39b311f9373932f0d2a289dc97cca3280')
# patches
# see https://savannah.gnu.org/bugs/?50234
patch('patch_4.2.1_inline.diff', when='@4.2.1')
# Variants
variant('readline', default=True)
variant('arpack', default=False)
variant('curl', default=False)
variant('fftw', default=False)
variant('fltk', default=False)
variant('fontconfig', default=False)
variant('freetype', default=False)
variant('glpk', default=False)
variant('gl2ps', default=False)
variant('gnuplot', default=False)
variant('magick', default=False)
variant('hdf5', default=False)
variant('jdk', default=False)
variant('llvm', default=False)
variant('opengl', default=False)
variant('qhull', default=False)
variant('qrupdate', default=False)
variant('qscintilla', default=False)
variant('qt', default=False)
variant('suitesparse', default=False)
variant('zlib', default=False)
# Required dependencies
depends_on('blas')
depends_on('lapack')
# Octave does not configure with sed from darwin:
depends_on('sed', when=sys.platform == 'darwin', type='build')
depends_on('pcre')
depends_on('pkgconfig', type='build')
# Strongly recommended dependencies
depends_on('readline', when='+readline')
# Optional dependencies
depends_on('arpack-ng', when='+arpack')
depends_on('curl', when='+curl')
depends_on('fftw', when='+fftw')
depends_on('fltk', when='+fltk')
depends_on('fontconfig', when='+fontconfig')
depends_on('freetype', when='+freetype')
depends_on('glpk', when='+glpk')
depends_on('gl2ps', when='+gl2ps')
depends_on('gnuplot', when='+gnuplot')
depends_on('imagemagick', when='+magick')
depends_on('hdf5', when='+hdf5')
depends_on('java', when='+jdk') # TODO: requires Java 6 ?
depends_on('llvm', when='+llvm')
depends_on('gl', when='+opengl')
depends_on('gl', when='+fltk')
depends_on('qhull', when='+qhull')
depends_on('qrupdate', when='+qrupdate')
depends_on('qscintilla', when='+qscintilla')
depends_on('qt+opengl', when='+qt')
depends_on('suite-sparse', when='+suitesparse')
depends_on('zlib', when='+zlib')
def patch(self):
# Filter mkoctfile.in.cc to use underlying compilers and not
# Spack compiler wrappers. We are patching the template file
# and not mkoctfile.cc since the latter is generated as part
# of the build.
mkoctfile_in = os.path.join(
self.stage.source_path, 'src', 'mkoctfile.in.cc'
)
quote = lambda s: '"' + s + '"'
entries_to_patch = {
r'%OCTAVE_CONF_MKOCTFILE_CC%': quote(self.compiler.cc),
r'%OCTAVE_CONF_MKOCTFILE_CXX%': quote(self.compiler.cxx),
r'%OCTAVE_CONF_MKOCTFILE_F77%': quote(self.compiler.f77),
r'%OCTAVE_CONF_MKOCTFILE_DL_LD%': quote(self.compiler.cxx),
r'%OCTAVE_CONF_MKOCTFILE_LD_CXX%': quote(self.compiler.cxx)
}
for pattern, subst in entries_to_patch.items():
filter_file(pattern, subst, mkoctfile_in)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_mkoctfile_works_outside_of_build_env(self):
# Check that mkoctfile is properly configured and can compile
# Octave extensions outside of the build env
mkoctfile = Executable(os.path.join(self.prefix, 'bin', 'mkoctfile'))
helloworld_cc = os.path.join(
os.path.dirname(__file__), 'helloworld.cc'
)
tmp_dir = tempfile.mkdtemp()
shutil.copy(helloworld_cc, tmp_dir)
# We need to unset these variables since we are still within
# Spack's build environment when running tests
vars_to_unset = ['CC', 'CXX', 'F77', 'FC']
with spack.util.environment.preserve_environment(*vars_to_unset):
# Delete temporarily the environment variables that point
# to Spack compiler wrappers
for v in vars_to_unset:
del os.environ[v]
# Check that mkoctfile outputs the expected value for CC
cc = mkoctfile('-p', 'CC', output=str)
msg = "mkoctfile didn't output the expected CC compiler"
assert self.compiler.cc in cc, msg
# Try to compile an Octave extension
shutil.copy(helloworld_cc, tmp_dir)
with working_dir(tmp_dir):
mkoctfile('helloworld.cc')
def configure_args(self):
# See
# https://github.com/macports/macports-ports/blob/master/math/octave/
# https://github.com/Homebrew/homebrew-science/blob/master/octave.rb
spec = self.spec
config_args = []
# Required dependencies
if '^mkl' in spec and 'gfortran' in self.compiler.fc:
mkl_re = re.compile(r'(mkl_)intel(_i?lp64\b)')
config_args.extend([
mkl_re.sub(r'\g<1>gf\g<2>',
'--with-blas={0}'.format(
spec['blas'].libs.ld_flags)),
'--with-lapack'
])
else:
config_args.extend([
'--with-blas={0}'.format(spec['blas'].libs.ld_flags),
'--with-lapack={0}'.format(spec['lapack'].libs.ld_flags)
])
# Strongly recommended dependencies
if '+readline' in spec:
config_args.append('--enable-readline')
else:
config_args.append('--disable-readline')
# Optional dependencies
if '+arpack' in spec:
sa = spec['arpack-ng']
config_args.extend([
"--with-arpack-includedir=%s" % sa.prefix.include,
"--with-arpack-libdir=%s" % sa.prefix.lib
])
else:
config_args.append("--without-arpack")
if '+curl' in spec:
config_args.extend([
"--with-curl-includedir=%s" % spec['curl'].prefix.include,
"--with-curl-libdir=%s" % spec['curl'].prefix.lib
])
else:
config_args.append("--without-curl")
if '+fftw' in spec:
config_args.extend([
"--with-fftw3-includedir=%s" % spec['fftw'].prefix.include,
"--with-fftw3-libdir=%s" % spec['fftw'].prefix.lib,
"--with-fftw3f-includedir=%s" % spec['fftw'].prefix.include,
"--with-fftw3f-libdir=%s" % spec['fftw'].prefix.lib
])
else:
config_args.extend([
"--without-fftw3",
"--without-fftw3f"
])
if '+fltk' in spec:
config_args.extend([
"--with-fltk-prefix=%s" % spec['fltk'].prefix,
"--with-fltk-exec-prefix=%s" % spec['fltk'].prefix
])
else:
config_args.append("--without-fltk")
if '+glpk' in spec:
config_args.extend([
"--with-glpk-includedir=%s" % spec['glpk'].prefix.include,
"--with-glpk-libdir=%s" % spec['glpk'].prefix.lib
])
else:
config_args.append("--without-glpk")
if '+magick' in spec:
config_args.append("--with-magick=%s"
% spec['imagemagick'].prefix.lib)
else:
config_args.append("--without-magick")
if '+hdf5' in spec:
config_args.extend([
"--with-hdf5-includedir=%s" % spec['hdf5'].prefix.include,
"--with-hdf5-libdir=%s" % spec['hdf5'].prefix.lib
])
else:
config_args.append("--without-hdf5")
if '+jdk' in spec:
config_args.extend([
"--with-java-homedir=%s" % spec['java'].home,
"--with-java-includedir=%s" % spec['java'].home.include,
"--with-java-libdir=%s" % spec['java'].libs.directories[0]
])
else:
config_args.append("--disable-java")
if '~opengl' and '~fltk' in spec:
config_args.extend([
"--without-opengl",
"--without-framework-opengl"
])
# TODO: opengl dependency and package is missing?
if '+qhull' in spec:
config_args.extend([
"--with-qhull-includedir=%s" % spec['qhull'].prefix.include,
"--with-qhull-libdir=%s" % spec['qhull'].prefix.lib
])
else:
config_args.append("--without-qhull")
if '+qrupdate' in spec:
config_args.extend([
"--with-qrupdate-includedir=%s"
% spec['qrupdate'].prefix.include,
"--with-qrupdate-libdir=%s" % spec['qrupdate'].prefix.lib
])
else:
config_args.append("--without-qrupdate")
if '+zlib' in spec:
config_args.extend([
"--with-z-includedir=%s" % spec['zlib'].prefix.include,
"--with-z-libdir=%s" % spec['zlib'].prefix.lib
])
else:
config_args.append("--without-z")
# If 64-bit BLAS is used:
if (spec.satisfies('^openblas+ilp64') or
spec.satisfies('^intel-mkl+ilp64') or
spec.satisfies('^intel-parallel-studio+mkl+ilp64')):
config_args.append('F77_INTEGER_8_FLAG=-fdefault-integer-8')
# Use gfortran calling-convention %fj
if spec.satisfies('%fj'):
config_args.append('--enable-fortran-calling-convention=gfortran')
return config_args
# ========================================================================
# Set up environment to make install easy for Octave extensions.
# ========================================================================
def setup_dependent_package(self, module, dependent_spec):
"""Called before Octave modules' install() methods.
In most cases, extensions will only need to have one line:
octave('--eval', 'pkg install %s' % self.stage.archive_file)
"""
# Octave extension builds can have a global Octave executable function
module.octave = Executable(join_path(self.spec.prefix.bin, 'octave'))
| 40.383871 | 95 | 0.584951 |
c4e3d3d92d51b048d206c0c8409af00ab8856070 | 33,881 | py | Python | rasa/core/agent.py | wtoalabi/rasa | 1106845b5628dc1f739a09f75270926b572af918 | [
"Apache-2.0"
] | null | null | null | rasa/core/agent.py | wtoalabi/rasa | 1106845b5628dc1f739a09f75270926b572af918 | [
"Apache-2.0"
] | 2 | 2020-04-17T19:46:20.000Z | 2020-04-17T20:06:06.000Z | rasa/core/agent.py | wtoalabi/rasa | 1106845b5628dc1f739a09f75270926b572af918 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import shutil
import tempfile
import uuid
from asyncio import CancelledError
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union
import aiohttp
from sanic import Sanic
import rasa
import rasa.utils.io
import rasa.core.utils
from rasa.constants import (
DEFAULT_DOMAIN_PATH,
LEGACY_DOCS_BASE_URL,
ENV_SANIC_BACKLOG,
DEFAULT_CORE_SUBDIRECTORY_NAME,
)
from rasa.core import constants, jobs, training
from rasa.core.channels.channel import InputChannel, OutputChannel, UserMessage
from rasa.core.constants import DEFAULT_REQUEST_TIMEOUT
from rasa.core.domain import Domain
from rasa.core.exceptions import AgentNotReady
from rasa.core.interpreter import NaturalLanguageInterpreter, RegexInterpreter
from rasa.core.lock_store import LockStore, InMemoryLockStore
from rasa.core.nlg import NaturalLanguageGenerator
from rasa.core.policies.ensemble import PolicyEnsemble, SimplePolicyEnsemble
from rasa.core.policies.memoization import MemoizationPolicy
from rasa.core.policies.policy import Policy
from rasa.core.processor import MessageProcessor
from rasa.core.tracker_store import (
InMemoryTrackerStore,
TrackerStore,
FailSafeTrackerStore,
)
from rasa.core.trackers import DialogueStateTracker
from rasa.exceptions import ModelNotFound
from rasa.importers.importer import TrainingDataImporter
from rasa.model import (
get_model_subdirectories,
get_latest_model,
unpack_model,
get_model,
)
from rasa.nlu.utils import is_url
from rasa.utils.common import raise_warning, update_sanic_log_level
from rasa.utils.endpoints import EndpointConfig
logger = logging.getLogger(__name__)
async def load_from_server(agent: "Agent", model_server: EndpointConfig) -> "Agent":
"""Load a persisted model from a server."""
# We are going to pull the model once first, and then schedule a recurring
# job. the benefit of this approach is that we can be sure that there
# is a model after this function completes -> allows to do proper
# "is alive" check on a startup server's `/status` endpoint. If the server
# is started, we can be sure that it also already loaded (or tried to)
# a model.
await _update_model_from_server(model_server, agent)
wait_time_between_pulls = model_server.kwargs.get("wait_time_between_pulls", 100)
if wait_time_between_pulls:
# continuously pull the model every `wait_time_between_pulls` seconds
await schedule_model_pulling(model_server, int(wait_time_between_pulls), agent)
return agent
def _load_and_set_updated_model(
agent: "Agent", model_directory: Text, fingerprint: Text
):
"""Load the persisted model into memory and set the model on the agent."""
logger.debug(f"Found new model with fingerprint {fingerprint}. Loading...")
core_path, nlu_path = get_model_subdirectories(model_directory)
if nlu_path:
from rasa.core.interpreter import RasaNLUInterpreter
interpreter = RasaNLUInterpreter(model_directory=nlu_path)
else:
interpreter = (
agent.interpreter if agent.interpreter is not None else RegexInterpreter()
)
domain = None
if core_path:
domain_path = os.path.join(os.path.abspath(core_path), DEFAULT_DOMAIN_PATH)
domain = Domain.load(domain_path)
try:
policy_ensemble = None
if core_path:
policy_ensemble = PolicyEnsemble.load(core_path)
agent.update_model(
domain, policy_ensemble, fingerprint, interpreter, model_directory
)
logger.debug("Finished updating agent to new model.")
except Exception:
logger.exception(
"Failed to load policy and update agent. "
"The previous model will stay loaded instead."
)
async def _update_model_from_server(
model_server: EndpointConfig, agent: "Agent"
) -> None:
"""Load a zipped Rasa Core model from a URL and update the passed agent."""
if not is_url(model_server.url):
raise aiohttp.InvalidURL(model_server.url)
model_directory_and_fingerprint = await _pull_model_and_fingerprint(
model_server, agent.fingerprint
)
if model_directory_and_fingerprint:
model_directory, new_model_fingerprint = model_directory_and_fingerprint
_load_and_set_updated_model(agent, model_directory, new_model_fingerprint)
else:
logger.debug(f"No new model found at URL {model_server.url}")
async def _pull_model_and_fingerprint(
model_server: EndpointConfig, fingerprint: Optional[Text]
) -> Optional[Tuple[Text, Text]]:
"""Queries the model server.
Returns the temporary model directory and value of the response's <ETag> header
which contains the model hash. Returns `None` if no new model is found.
"""
headers = {"If-None-Match": fingerprint}
logger.debug(f"Requesting model from server {model_server.url}...")
async with model_server.session() as session:
try:
params = model_server.combine_parameters()
async with session.request(
"GET",
model_server.url,
timeout=DEFAULT_REQUEST_TIMEOUT,
headers=headers,
params=params,
) as resp:
if resp.status in [204, 304]:
logger.debug(
"Model server returned {} status code, "
"indicating that no new model is available. "
"Current fingerprint: {}"
"".format(resp.status, fingerprint)
)
return None
elif resp.status == 404:
logger.debug(
"Model server could not find a model at the requested "
"endpoint '{}'. It's possible that no model has been "
"trained, or that the requested tag hasn't been "
"assigned.".format(model_server.url)
)
return None
elif resp.status != 200:
logger.debug(
"Tried to fetch model from server, but server response "
"status code is {}. We'll retry later..."
"".format(resp.status)
)
return None
model_directory = tempfile.mkdtemp()
rasa.utils.io.unarchive(await resp.read(), model_directory)
logger.debug(
"Unzipped model to '{}'".format(os.path.abspath(model_directory))
)
# get the new fingerprint
new_fingerprint = resp.headers.get("ETag")
# return new tmp model directory and new fingerprint
return model_directory, new_fingerprint
except aiohttp.ClientError as e:
logger.debug(
"Tried to fetch model from server, but "
"couldn't reach server. We'll retry later... "
"Error: {}.".format(e)
)
return None
async def _run_model_pulling_worker(
model_server: EndpointConfig, agent: "Agent"
) -> None:
# noinspection PyBroadException
try:
await _update_model_from_server(model_server, agent)
except CancelledError:
logger.warning("Stopping model pulling (cancelled).")
except Exception:
logger.exception(
"An exception was raised while fetching a model. Continuing anyways..."
)
async def schedule_model_pulling(
model_server: EndpointConfig, wait_time_between_pulls: int, agent: "Agent"
):
(await jobs.scheduler()).add_job(
_run_model_pulling_worker,
"interval",
seconds=wait_time_between_pulls,
args=[model_server, agent],
id="pull-model-from-server",
replace_existing=True,
)
async def load_agent(
model_path: Optional[Text] = None,
model_server: Optional[EndpointConfig] = None,
remote_storage: Optional[Text] = None,
interpreter: Optional[NaturalLanguageInterpreter] = None,
generator: Union[EndpointConfig, NaturalLanguageGenerator] = None,
tracker_store: Optional[TrackerStore] = None,
lock_store: Optional[LockStore] = None,
action_endpoint: Optional[EndpointConfig] = None,
):
try:
if model_server is not None:
return await load_from_server(
Agent(
interpreter=interpreter,
generator=generator,
tracker_store=tracker_store,
lock_store=lock_store,
action_endpoint=action_endpoint,
model_server=model_server,
remote_storage=remote_storage,
),
model_server,
)
elif remote_storage is not None:
return Agent.load_from_remote_storage(
remote_storage,
model_path,
interpreter=interpreter,
generator=generator,
tracker_store=tracker_store,
lock_store=lock_store,
action_endpoint=action_endpoint,
model_server=model_server,
)
elif model_path is not None and os.path.exists(model_path):
return Agent.load_local_model(
model_path,
interpreter=interpreter,
generator=generator,
tracker_store=tracker_store,
lock_store=lock_store,
action_endpoint=action_endpoint,
model_server=model_server,
remote_storage=remote_storage,
)
else:
raise_warning("No valid configuration given to load agent.")
return None
except Exception as e:
logger.error(f"Could not load model due to {e}.")
raise
class Agent:
"""The Agent class provides a convenient interface for the most important
Rasa functionality.
This includes training, handling messages, loading a dialogue model,
getting the next action, and handling a channel."""
def __init__(
self,
domain: Union[Text, Domain, None] = None,
policies: Union[PolicyEnsemble, List[Policy], None] = None,
interpreter: Optional[NaturalLanguageInterpreter] = None,
generator: Union[EndpointConfig, NaturalLanguageGenerator, None] = None,
tracker_store: Optional[TrackerStore] = None,
lock_store: Optional[LockStore] = None,
action_endpoint: Optional[EndpointConfig] = None,
fingerprint: Optional[Text] = None,
model_directory: Optional[Text] = None,
model_server: Optional[EndpointConfig] = None,
remote_storage: Optional[Text] = None,
path_to_model_archive: Optional[Text] = None,
):
# Initializing variables with the passed parameters.
self.domain = self._create_domain(domain)
self.policy_ensemble = self._create_ensemble(policies)
if self.domain is not None:
self.domain.add_requested_slot()
self.domain.add_knowledge_base_slots()
self.domain.add_categorical_slot_default_value()
PolicyEnsemble.check_domain_ensemble_compatibility(
self.policy_ensemble, self.domain
)
self.interpreter = NaturalLanguageInterpreter.create(interpreter)
self.nlg = NaturalLanguageGenerator.create(generator, self.domain)
self.tracker_store = self.create_tracker_store(tracker_store, self.domain)
self.lock_store = self._create_lock_store(lock_store)
self.action_endpoint = action_endpoint
self._set_fingerprint(fingerprint)
self.model_directory = model_directory
self.model_server = model_server
self.remote_storage = remote_storage
self.path_to_model_archive = path_to_model_archive
def update_model(
self,
domain: Optional[Domain],
policy_ensemble: Optional[PolicyEnsemble],
fingerprint: Optional[Text],
interpreter: Optional[NaturalLanguageInterpreter] = None,
model_directory: Optional[Text] = None,
) -> None:
self.domain = self._create_domain(domain)
self.policy_ensemble = policy_ensemble
if interpreter:
self.interpreter = NaturalLanguageInterpreter.create(interpreter)
self._set_fingerprint(fingerprint)
# update domain on all instances
self.tracker_store.domain = domain
if hasattr(self.nlg, "templates"):
self.nlg.templates = domain.templates if domain else {}
self.model_directory = model_directory
@classmethod
def load(
cls,
model_path: Text,
interpreter: Optional[NaturalLanguageInterpreter] = None,
generator: Union[EndpointConfig, NaturalLanguageGenerator] = None,
tracker_store: Optional[TrackerStore] = None,
lock_store: Optional[LockStore] = None,
action_endpoint: Optional[EndpointConfig] = None,
model_server: Optional[EndpointConfig] = None,
remote_storage: Optional[Text] = None,
path_to_model_archive: Optional[Text] = None,
) -> "Agent":
"""Load a persisted model from the passed path."""
try:
if not model_path:
raise ModelNotFound("No path specified.")
elif not os.path.exists(model_path):
raise ModelNotFound(f"No file or directory at '{model_path}'.")
elif os.path.isfile(model_path):
model_path = get_model(model_path)
except ModelNotFound:
raise ValueError(
"You are trying to load a MODEL from '{}', which is not possible. \n"
"The model path should be a 'tar.gz' file or a directory "
"containing the various model files in the sub-directories 'core' "
"and 'nlu'. \n\nIf you want to load training data instead of "
"a model, use `agent.load_data(...)` instead.".format(model_path)
)
core_model, nlu_model = get_model_subdirectories(model_path)
if not interpreter and nlu_model:
interpreter = NaturalLanguageInterpreter.create(nlu_model)
domain = None
ensemble = None
if core_model:
domain = Domain.load(os.path.join(core_model, DEFAULT_DOMAIN_PATH))
ensemble = PolicyEnsemble.load(core_model) if core_model else None
# ensures the domain hasn't changed between test and train
domain.compare_with_specification(core_model)
return cls(
domain=domain,
policies=ensemble,
interpreter=interpreter,
generator=generator,
tracker_store=tracker_store,
lock_store=lock_store,
action_endpoint=action_endpoint,
model_directory=model_path,
model_server=model_server,
remote_storage=remote_storage,
path_to_model_archive=path_to_model_archive,
)
def is_core_ready(self) -> bool:
"""Check if all necessary components and policies are ready to use the agent.
"""
return self.is_ready() and self.policy_ensemble is not None
def is_ready(self) -> bool:
"""Check if all necessary components are instantiated to use agent.
Policies might not be available, if this is an NLU only agent."""
return self.tracker_store is not None and self.interpreter is not None
async def parse_message_using_nlu_interpreter(
self, message_data: Text, tracker: DialogueStateTracker = None
) -> Dict[Text, Any]:
"""Handles message text and intent payload input messages.
The return value of this function is parsed_data.
Args:
message_data (Text): Contain the received message in text or\
intent payload format.
tracker (DialogueStateTracker): Contains the tracker to be\
used by the interpreter.
Returns:
The parsed message.
Example:
{\
"text": '/greet{"name":"Rasa"}',\
"intent": {"name": "greet", "confidence": 1.0},\
"intent_ranking": [{"name": "greet", "confidence": 1.0}],\
"entities": [{"entity": "name", "start": 6,\
"end": 21, "value": "Rasa"}],\
}
"""
processor = self.create_processor()
message = UserMessage(message_data)
return await processor._parse_message(message, tracker)
async def handle_message(
self,
message: UserMessage,
message_preprocessor: Optional[Callable[[Text], Text]] = None,
**kwargs,
) -> Optional[List[Dict[Text, Any]]]:
"""Handle a single message."""
if not isinstance(message, UserMessage):
# DEPRECATION EXCEPTION - remove in 2.1
raise Exception(
"Passing a text to `agent.handle_message(...)` is "
"not supported anymore. Rather use `agent.handle_text(...)`.",
)
def noop(_):
logger.info("Ignoring message as there is no agent to handle it.")
return None
if not self.is_ready():
return noop(message)
processor = self.create_processor(message_preprocessor)
async with self.lock_store.lock(message.sender_id):
return await processor.handle_message(message)
# noinspection PyUnusedLocal
async def predict_next(
self, sender_id: Text, **kwargs: Any
) -> Optional[Dict[Text, Any]]:
"""Handle a single message."""
processor = self.create_processor()
return await processor.predict_next(sender_id)
# noinspection PyUnusedLocal
async def log_message(
self,
message: UserMessage,
message_preprocessor: Optional[Callable[[Text], Text]] = None,
**kwargs: Any,
) -> DialogueStateTracker:
"""Append a message to a dialogue - does not predict actions."""
processor = self.create_processor(message_preprocessor)
return await processor.log_message(message)
async def execute_action(
self,
sender_id: Text,
action: Text,
output_channel: OutputChannel,
policy: Text,
confidence: float,
) -> DialogueStateTracker:
"""Handle a single message."""
processor = self.create_processor()
return await processor.execute_action(
sender_id, action, output_channel, self.nlg, policy, confidence
)
async def trigger_intent(
self,
intent_name: Text,
entities: List[Dict[Text, Any]],
output_channel: OutputChannel,
tracker: DialogueStateTracker,
) -> None:
"""Trigger a user intent, e.g. triggered by an external event."""
processor = self.create_processor()
await processor.trigger_external_user_uttered(
intent_name, entities, tracker, output_channel,
)
async def handle_text(
self,
text_message: Union[Text, Dict[Text, Any]],
message_preprocessor: Optional[Callable[[Text], Text]] = None,
output_channel: Optional[OutputChannel] = None,
sender_id: Optional[Text] = UserMessage.DEFAULT_SENDER_ID,
) -> Optional[List[Dict[Text, Any]]]:
"""Handle a single message.
If a message preprocessor is passed, the message will be passed to that
function first and the return value is then used as the
input for the dialogue engine.
The return value of this function depends on the ``output_channel``. If
the output channel is not set, set to ``None``, or set
to ``CollectingOutputChannel`` this function will return the messages
the bot wants to respond.
:Example:
>>> from rasa.core.agent import Agent
>>> from rasa.core.interpreter import RasaNLUInterpreter
>>> agent = Agent.load("examples/moodbot/models")
>>> await agent.handle_text("hello")
[u'how can I help you?']
"""
if isinstance(text_message, str):
text_message = {"text": text_message}
msg = UserMessage(text_message.get("text"), output_channel, sender_id)
return await self.handle_message(msg, message_preprocessor)
def toggle_memoization(self, activate: bool) -> None:
"""Toggles the memoization on and off.
If a memoization policy is present in the ensemble, this will toggle
the prediction of that policy. When set to ``False`` the Memoization
policies present in the policy ensemble will not make any predictions.
Hence, the prediction result from the ensemble always needs to come
from a different policy (e.g. ``TEDPolicy``). Useful to test
prediction
capabilities of an ensemble when ignoring memorized turns from the
training data."""
if not self.policy_ensemble:
return
for p in self.policy_ensemble.policies:
# explicitly ignore inheritance (e.g. augmented memoization policy)
if type(p) == MemoizationPolicy:
p.toggle(activate)
def _max_history(self) -> int:
"""Find maximum max_history."""
max_histories = [
policy.featurizer.max_history
for policy in self.policy_ensemble.policies
if hasattr(policy.featurizer, "max_history")
]
return max(max_histories or [0])
def _are_all_featurizers_using_a_max_history(self) -> bool:
"""Check if all featurizers are MaxHistoryTrackerFeaturizer."""
def has_max_history_featurizer(policy):
return policy.featurizer and hasattr(policy.featurizer, "max_history")
for p in self.policy_ensemble.policies:
if p.featurizer and not has_max_history_featurizer(p):
return False
return True
async def load_data(
self,
training_resource: Union[Text, TrainingDataImporter],
remove_duplicates: bool = True,
unique_last_num_states: Optional[int] = None,
augmentation_factor: int = 50,
tracker_limit: Optional[int] = None,
use_story_concatenation: bool = True,
debug_plots: bool = False,
exclusion_percentage: int = None,
) -> List[DialogueStateTracker]:
"""Load training data from a resource."""
max_history = self._max_history()
if unique_last_num_states is None:
# for speed up of data generation
# automatically detect unique_last_num_states
# if it was not set and
# if all featurizers are MaxHistoryTrackerFeaturizer
if self._are_all_featurizers_using_a_max_history():
unique_last_num_states = max_history
elif unique_last_num_states < max_history:
# possibility of data loss
raise_warning(
f"unique_last_num_states={unique_last_num_states} but "
f"maximum max_history={max_history}. "
f"Possibility of data loss. "
f"It is recommended to set "
f"unique_last_num_states to "
f"at least maximum max_history."
)
return await training.load_data(
training_resource,
self.domain,
remove_duplicates,
unique_last_num_states,
augmentation_factor,
tracker_limit,
use_story_concatenation,
debug_plots,
exclusion_percentage=exclusion_percentage,
)
def train(
self, training_trackers: List[DialogueStateTracker], **kwargs: Any
) -> None:
"""Train the policies / policy ensemble using dialogue data from file.
Args:
training_trackers: trackers to train on
**kwargs: additional arguments passed to the underlying ML
trainer (e.g. keras parameters)
"""
if not self.is_core_ready():
raise AgentNotReady("Can't train without a policy ensemble.")
# deprecation tests
if kwargs.get("featurizer"):
raise Exception(
"Passing `featurizer` "
"to `agent.train(...)` is not supported anymore. "
"Pass appropriate featurizer directly "
"to the policy configuration instead. More info "
"{}/core/migrations.html".format(LEGACY_DOCS_BASE_URL)
)
if (
kwargs.get("epochs")
or kwargs.get("max_history")
or kwargs.get("batch_size")
):
raise Exception(
"Passing policy configuration parameters "
"to `agent.train(...)` is not supported "
"anymore. Specify parameters directly in the "
"policy configuration instead. More info "
"{}/core/migrations.html".format(LEGACY_DOCS_BASE_URL)
)
if isinstance(training_trackers, str):
# the user most likely passed in a file name to load training
# data from
raise Exception(
"Passing a file name to `agent.train(...)` is "
"not supported anymore. Rather load the data with "
"`data = agent.load_data(file_name)` and pass it "
"to `agent.train(data)`."
)
logger.debug(f"Agent trainer got kwargs: {kwargs}")
self.policy_ensemble.train(training_trackers, self.domain, **kwargs)
self._set_fingerprint()
def _set_fingerprint(self, fingerprint: Optional[Text] = None) -> None:
if fingerprint:
self.fingerprint = fingerprint
else:
self.fingerprint = uuid.uuid4().hex
@staticmethod
def _clear_model_directory(model_path: Text) -> None:
"""Remove existing files from model directory.
Only removes files if the directory seems to contain a previously
persisted model. Otherwise does nothing to avoid deleting
`/` by accident."""
if not os.path.exists(model_path):
return
domain_spec_path = os.path.join(model_path, "metadata.json")
# check if there were a model before
if os.path.exists(domain_spec_path):
logger.info(
"Model directory {} exists and contains old "
"model files. All files will be overwritten."
"".format(model_path)
)
shutil.rmtree(model_path)
else:
logger.debug(
"Model directory {} exists, but does not contain "
"all old model files. Some files might be "
"overwritten.".format(model_path)
)
def persist(self, model_path: Text) -> None:
"""Persists this agent into a directory for later loading and usage."""
if not self.is_core_ready():
raise AgentNotReady("Can't persist without a policy ensemble.")
if not model_path.endswith(DEFAULT_CORE_SUBDIRECTORY_NAME):
model_path = os.path.join(model_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
self._clear_model_directory(model_path)
self.policy_ensemble.persist(model_path)
self.domain.persist(os.path.join(model_path, DEFAULT_DOMAIN_PATH))
self.domain.persist_specification(model_path)
logger.info("Persisted model to '{}'".format(os.path.abspath(model_path)))
async def visualize(
self,
resource_name: Text,
output_file: Text,
max_history: Optional[int] = None,
nlu_training_data: Optional[Text] = None,
should_merge_nodes: bool = True,
fontsize: int = 12,
) -> None:
from rasa.core.training.visualization import visualize_stories
from rasa.core.training.dsl import StoryFileReader
"""Visualize the loaded training data from the resource."""
# if the user doesn't provide a max history, we will use the
# largest value from any policy
max_history = max_history or self._max_history()
story_steps = await StoryFileReader.read_from_folder(resource_name, self.domain)
await visualize_stories(
story_steps,
self.domain,
output_file,
max_history,
self.interpreter,
nlu_training_data,
should_merge_nodes,
fontsize,
)
def create_processor(
self, preprocessor: Optional[Callable[[Text], Text]] = None
) -> MessageProcessor:
"""Instantiates a processor based on the set state of the agent."""
# Checks that the interpreter and tracker store are set and
# creates a processor
if not self.is_ready():
raise AgentNotReady(
"Agent needs to be prepared before usage. You need to set an "
"interpreter and a tracker store."
)
return MessageProcessor(
self.interpreter,
self.policy_ensemble,
self.domain,
self.tracker_store,
self.nlg,
action_endpoint=self.action_endpoint,
message_preprocessor=preprocessor,
)
@staticmethod
def _create_domain(domain: Union[Domain, Text, None]) -> Domain:
if isinstance(domain, str):
domain = Domain.load(domain)
domain.check_missing_templates()
return domain
elif isinstance(domain, Domain):
return domain
elif domain is None:
return Domain.empty()
else:
raise ValueError(
"Invalid param `domain`. Expected a path to a domain "
"specification or a domain instance. But got "
"type '{}' with value '{}'".format(type(domain), domain)
)
@staticmethod
def create_tracker_store(
store: Optional[TrackerStore], domain: Domain
) -> TrackerStore:
if store is not None:
store.domain = domain
tracker_store = store
else:
tracker_store = InMemoryTrackerStore(domain)
return FailSafeTrackerStore(tracker_store)
@staticmethod
def _create_lock_store(store: Optional[LockStore]) -> LockStore:
if store is not None:
return store
return InMemoryLockStore()
@staticmethod
def _create_ensemble(
policies: Union[List[Policy], PolicyEnsemble, None]
) -> Optional[PolicyEnsemble]:
if policies is None:
return None
if isinstance(policies, list):
return SimplePolicyEnsemble(policies)
elif isinstance(policies, PolicyEnsemble):
return policies
else:
passed_type = type(policies).__name__
raise ValueError(
"Invalid param `policies`. Passed object is "
"of type '{}', but should be policy, an array of "
"policies, or a policy ensemble.".format(passed_type)
)
@staticmethod
def load_local_model(
model_path: Text,
interpreter: Optional[NaturalLanguageInterpreter] = None,
generator: Union[EndpointConfig, NaturalLanguageGenerator] = None,
tracker_store: Optional[TrackerStore] = None,
lock_store: Optional[LockStore] = None,
action_endpoint: Optional[EndpointConfig] = None,
model_server: Optional[EndpointConfig] = None,
remote_storage: Optional[Text] = None,
) -> "Agent":
if os.path.isfile(model_path):
model_archive = model_path
else:
model_archive = get_latest_model(model_path)
if model_archive is None:
raise_warning(f"Could not load local model in '{model_path}'.")
return Agent()
working_directory = tempfile.mkdtemp()
unpacked_model = unpack_model(model_archive, working_directory)
return Agent.load(
unpacked_model,
interpreter=interpreter,
generator=generator,
tracker_store=tracker_store,
lock_store=lock_store,
action_endpoint=action_endpoint,
model_server=model_server,
remote_storage=remote_storage,
path_to_model_archive=model_archive,
)
@staticmethod
def load_from_remote_storage(
remote_storage: Text,
model_name: Text,
interpreter: Optional[NaturalLanguageInterpreter] = None,
generator: Union[EndpointConfig, NaturalLanguageGenerator] = None,
tracker_store: Optional[TrackerStore] = None,
lock_store: Optional[LockStore] = None,
action_endpoint: Optional[EndpointConfig] = None,
model_server: Optional[EndpointConfig] = None,
) -> Optional["Agent"]:
from rasa.nlu.persistor import get_persistor
persistor = get_persistor(remote_storage)
if persistor is not None:
target_path = tempfile.mkdtemp()
persistor.retrieve(model_name, target_path)
return Agent.load(
target_path,
interpreter=interpreter,
generator=generator,
tracker_store=tracker_store,
lock_store=lock_store,
action_endpoint=action_endpoint,
model_server=model_server,
remote_storage=remote_storage,
)
return None
| 36.431183 | 88 | 0.624008 |
ebf981e21e3aa8af4659473e806c4b779f938b17 | 2,572 | py | Python | tests/test_export_convert_to_jpeg.py | garz75/osxphotos | 8cedef447ee8cf8419ae3cbdfc197b2dbab5186a | [
"MIT"
] | null | null | null | tests/test_export_convert_to_jpeg.py | garz75/osxphotos | 8cedef447ee8cf8419ae3cbdfc197b2dbab5186a | [
"MIT"
] | null | null | null | tests/test_export_convert_to_jpeg.py | garz75/osxphotos | 8cedef447ee8cf8419ae3cbdfc197b2dbab5186a | [
"MIT"
] | null | null | null | import os
import pytest
from osxphotos._constants import _UNKNOWN_PERSON
skip_test = "OSXPHOTOS_TEST_CONVERT" not in os.environ
pytestmark = pytest.mark.skipif(
skip_test, reason="Skip if running on GitHub actions, no GPU."
)
PHOTOS_DB = "tests/Test-10.15.6.photoslibrary"
UUID_DICT = {
"raw": "D05A5FE3-15FB-49A1-A15D-AB3DA6F8B068",
"heic": "7783E8E6-9CAC-40F3-BE22-81FB7051C266",
}
NAMES_DICT = {
"raw": "DSC03584.jpeg",
"heic": "IMG_3092.jpeg"
}
UUID_LIVE_HEIC = "612CE30B-3D8F-417A-9B14-EC42CBA10ACC"
NAMES_LIVE_HEIC = [
"IMG_3259.jpeg",
"IMG_3259.mov"
]
@pytest.fixture(scope="module")
def photosdb():
import osxphotos
return osxphotos.PhotosDB(dbfile=PHOTOS_DB)
def test_export_convert_raw_to_jpeg(photosdb):
# test export with convert_to_jpeg
import pathlib
import tempfile
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["raw"]])
results = photos[0].export2(dest, convert_to_jpeg=True)
got_dest = pathlib.Path(results.exported[0])
assert got_dest.is_file()
assert got_dest.suffix == ".jpeg"
assert got_dest.name == NAMES_DICT["raw"]
def test_export_convert_heic_to_jpeg(photosdb):
# test export with convert_to_jpeg
import pathlib
import tempfile
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photos = photosdb.photos(uuid=[UUID_DICT["heic"]])
results = photos[0].export2(dest, convert_to_jpeg=True)
got_dest = pathlib.Path(results.exported[0])
assert got_dest.is_file()
assert got_dest.suffix == ".jpeg"
assert got_dest.name == NAMES_DICT["heic"]
@pytest.mark.skipif(
"OSXPHOTOS_TEST_EXPORT" not in os.environ,
reason="Skip if not running against author's personal library",
)
def test_export_convert_live_heic_to_jpeg():
# test export with convert_to_jpeg with live heic (issue #235)
# don't have a live HEIC in one of the test libraries so use one from
# my personal library
import os
import pathlib
import tempfile
import osxphotos
photosdb = osxphotos.PhotosDB()
tempdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
dest = tempdir.name
photo = photosdb.get_photo(UUID_LIVE_HEIC)
results = photo.export2(dest, convert_to_jpeg=True, live_photo=True)
for name in NAMES_LIVE_HEIC:
assert f"{tempdir.name}/{name}" in results.exported
for file_ in results.exported:
dest = pathlib.Path(file_)
assert dest.is_file()
| 25.979798 | 73 | 0.71423 |
1480562a98949cac914cb4867bcd52accffa64df | 17,976 | py | Python | utils/neural_net_testing.py | qarchli/Multi-Layer-Neural-Network | f8f14f90ec90644c89f0e291c9a41e4d2fec7850 | [
"MIT"
] | null | null | null | utils/neural_net_testing.py | qarchli/Multi-Layer-Neural-Network | f8f14f90ec90644c89f0e291c9a41e4d2fec7850 | [
"MIT"
] | null | null | null | utils/neural_net_testing.py | qarchli/Multi-Layer-Neural-Network | f8f14f90ec90644c89f0e291c9a41e4d2fec7850 | [
"MIT"
] | 1 | 2021-11-14T16:37:53.000Z | 2021-11-14T16:37:53.000Z | import numpy as np
import activation_functions
import performance
import init_methods
import debug_utils
class MLNN:
def __init__(self, layers_dims, init_method, activations, lambd, keep_prob, learning_rate, num_iterations):
"""
Initializes a Muli-Layer Neural Network.
---
Arguments:
layers_dims: list containing the dimension of each layer of the
Neural Net.
init_method: flag refering to the init method to use e.g.
('xavier', 'he')
activations: list containing the activation functions to be used
in the hidden layers and the output layer respectively. e.g. ('relu', 'sigmoid').
lambd: L2 Regularization parameter.
keep_prob: probability of keeping a neuron in dropout
regularization.
learning_rate: learning rate of the gradient descent update rule
num_iterations: number of iterations of the optimization loop
Returns:
MLNN: instance of MLNN.
"""
self.__layers_dims = layers_dims
self.__init_method = init_method
self.__activations = activations
self.__lambd = lambd
self.__keep_prob = keep_prob
self.__learning_rate = learning_rate
self.__num_iterations = num_iterations
# Initializing the parameters
self.__parameters = self.__initialize_parameters(init_method)
self.__costs = None
self.__layer_tracker = 0
# ========================================================================
# Initialization
# ========================================================================
def __initialize_parameters(self, init_method):
"""
Randomly initialize the W and initialize b to zeros for every layer.
---
Argument:
layers_dims: list containing the dimension of each layer of the
NN.
init_method: flag refering to the init method to use e.g.
('xavier', 'he')
Returns:
initial_parameters: python dictionnary containing the initialized
parameters "W1", "b1", ..., "WL", "bL".
"""
initial_parameters = {}
if init_method == 'xavier':
initial_parameters = init_methods.initialize_parameters_xavier(
self.__layers_dims)
elif init_method == 'he':
initial_parameters = init_methods.initialize_parameters_he(
self.__layers_dims)
else:
initial_parameters = init_methods.initialize_parameters_random(
self.__layers_dims)
return initial_parameters
# ========================================================================
# Forward Propagation
# ========================================================================
def __one_layer_forward_propagation(self, A_prev, W_current, b_current,
activation_current):
"""
Performs forward propagation only for one layer, on all the training examples.
---
Arguments:
A_prev: activation from the previous layer. Numpy array of
shape (# units of the previous layer, number of examples)
W_current: weights. Numpy array of shape (# units of current
layer, # units of the previous layer)
b_current: bias. array of shape (# units of current layer, 1)
activation_current: activation function of the current layer as
string e.g. ('relu', 'sigmoid', 'tanh')
Returns:
A_current: the output of the activation function of the current
layer. numpy array of size (# units of current layer, 1)
cache_current: cache from the current layer. (values to be used
later in the backprop step.)
"""
self.__layer_tracker += 1
print('I\'m in layer {} moving forward'.format(self.__layer_tracker))
dispatcher = {
'sigmoid': activation_functions.sigmoid,
'relu': activation_functions.relu,
'tanh': activation_functions.tanh
}
activation = dispatcher[activation_current]
Z_current = np.dot(W_current, A_prev) + b_current
A_current = activation(Z_current)
# Forward Prop Inverted Dropout
if self.__layer_tracker == (len(self.__layers_dims) - 1):
D_current = np.ones(A_current.shape)
else:
D_current = np.random.rand(*A_current.shape)
D_current = (D_current < self.__keep_prob)
A_current *= D_current
A_current /= self.__keep_prob
cache_current = (A_prev, Z_current, W_current, D_current)
print('A_prev.shape = ', A_prev.shape)
print('A_current.shape = ', A_current.shape)
print()
return (A_current, cache_current)
def __deep_forward_propagation(self, X):
"""
Performs forward propagation for every layer, on all the training examples.
---
Arguments:
X: input matrix. Numpy array of shape (input size, # of examples)
Returns:
AL: (y_hat) the output of the last layer L.
deep_forward_caches: caches from every layer during the forward prop.
"""
deep_forward_caches = []
L = int(len(self.__parameters) / 2) # total number of layers
m = X.shape[1] # number of training examples
A_current = X # A0: initial activation
for l in range(1, L):
A_prev = A_current
W_current = self.__parameters['W' + str(l)]
b_current = self.__parameters['b' + str(l)]
A_current, cache_current = self.__one_layer_forward_propagation(
A_prev, W_current, b_current, self.__activations[0])
deep_forward_caches.append(cache_current)
# Activation of the last layer (L) using the sigmoid function
A_prev = A_current
WL = self.__parameters['W' + str(L)]
bL = self.__parameters['b' + str(L)]
AL, cacheL = self.__one_layer_forward_propagation(
A_prev, WL, bL, self.__activations[1])
deep_forward_caches.append(cacheL)
assert (AL.shape == (1, m))
return (AL, deep_forward_caches)
# ========================================================================
# Back Propagation
# ========================================================================
def __one_layer_back_propagation(self, dA_current, cache_current,
activation_current):
"""
Performs back propagation only for one layer, on all the training examples.
---
Arguments:
A_prev -- activation from the previous layer. Numpy array of shape (# units of the previous layer, number of examples)
W_current: weights matrix. Numpy array of shape (# units of current layer, # units of the previous layer)
b_current: bias vector. Numpy array of shape (# units of current layer, 1)
do: boolean for dropout
Returns:
dA_prev -- the output of the activation function. numpy array of size (# units of current layer, 1)
"""
print('I\'m in layer {} moving backward.'.format(self.__layer_tracker))
n, m = dA_current.shape
A_prev, Z_current, W_current, D_current = cache_current
# Back Prop Inverted Dropout
if self.__layer_tracker != (len(self.__layers_dims) - 1):
dA_current *= D_current
dA_current /= self.__keep_prob
dispatcher = {
'sigmoid': activation_functions.sigmoid_p,
'relu': activation_functions.relu_p,
'tanh': activation_functions.tanh_p
}
activation_p = dispatcher[activation_current]
dZ_current = dA_current * activation_p(Z_current)
dW_current = 1. / m * \
np.dot(dZ_current, A_prev.T) + self.__lambd / m * W_current
db_current = 1. / m * np.sum(dZ_current, axis=1, keepdims=True)
dA_prev = np.dot(W_current.T, dZ_current)
print('dA_prev.shape = ', dA_prev.shape)
print('dA_current.shape = ', dA_current.shape)
print()
self.__layer_tracker -= 1
return (dA_prev, dW_current, db_current)
def __deep_back_propagation(self, AL, Y, deep_forward_caches):
"""
Performs backward propagation for every layer, on all the training examples.
---
Arguments:
AL: probability vector, output of the deep forward propagation (deep_forward_propagation()).
Y: true "label" vector.
deep_forward_caches: list of caches from the deep_forward_propagation containing:
-"relu" activation caches (deep_caches[l], for l in range(L-1) i.e l = 0...L-2)
-"sigmoid" activation cache (deep_caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
L = len(deep_forward_caches) # total number of layers
n, m = AL.shape # number of training examples
Y = Y.reshape(AL.shape)
grads = {}
def write_grads(dA, dW, db, layer):
"""
Writes gradient values to the returned dictionnary (grads).
"""
# grads['dA' + str(layer)] = dA
grads['dW' + str(layer)] = dW
grads['db' + str(layer)] = db
# Performing backprop on the last layer
dA_current = -Y / AL + (1 - Y) / (
1 - AL) # = dAL (backprop initialization)
cache_current = deep_forward_caches[L - 1]
dA_prev, dW_current, db_current = self.__one_layer_back_propagation(
dA_current, cache_current, self.__activations[1])
write_grads(dA_current, dW_current, db_current, L)
for l in range(L - 2, -1, -1):
# Updating the current cache and the current derivative
cache_current = deep_forward_caches[l]
dA_current = dA_prev
dA_prev, dW_current, db_current = self.__one_layer_back_propagation(
dA_current, cache_current, self.__activations[0])
write_grads(dA_current, dW_current, db_current, l + 1)
# Reset layer tracker
# self.__layer_tracker = 0
return (grads)
# ========================================================================
# Weights update
# ========================================================================
def __update_parameters(self, grads):
"""
Update parameters using gradient descent
--
Arguments:
parameters: dictionary containing all the parameters
grads: dictionary containing all the gradients, output of deep_backward_propagation()
Returns:
parameters: dictionary containing updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
self.__parameters = {
param_key:
self.__parameters[param_key] -
self.__learning_rate * grads[grad_key]
for param_key, grad_key in zip(
sorted(self.__parameters.keys()), sorted(grads.keys()))
}
# ========================================================================
# Training phase
# ========================================================================
def train(self, X, Y):
"""
Implements a two-layer neural network.
Arguments:
X: input data, of shape (n_x, number of examples)
Y: true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
Returns:
parameters: a dictionary containing the learnt parameters W, b.
costs: list of costs computed after each forward prop.
"""
# Array containing the costs
costs = []
# Initializing the parameteres
# parameters = self.__initialize_parameters()
for iteration in range(self.__num_iterations):
print('\tIteration {}'.format(iteration))
# Performing forward prop
AL, deep_forward_caches = self.__deep_forward_propagation(
X)
# print('iteration = ', iteration)
# print('AL ', AL)
# Computing the cost including L2 Regularization
cost = performance.compute_cost(
Y, AL, self.__parameters, self.__lambd)
costs.append(cost)
# Performing backprop
grads = self.__deep_back_propagation(AL, Y, deep_forward_caches)
# Updating the parameters
self.__update_parameters(grads)
# Adding the costs to the MLNN params
self.__costs = costs
return (self)
# ========================================================================
# Prediction making
# ========================================================================
def predict(self, X, threshold=0.5):
"""
Using the learnt parameters to predict a label for each example in X.
--
Arguments:
X: input data as a numpy array of size
(# of features, # of examples)
parameters: the learnt parameters of the trained model
Returns:
predictions: vector of the predicted labels corresponding to X.
"""
AL, _ = self.__deep_forward_propagation(X) # Vector of probabilities
predictions = (AL >= threshold)
return (predictions.astype(int))
# ========================================================================
# Getting the params and costs
# ========================================================================
def get_params(self):
"""
Returns a dictionnary of the NN hyperparameters.
"""
params = {
'layers_dims': self.__layers_dims,
'init_method': self.__init_method,
'activations': self.__activations,
'lambda': self.__lambd,
'learning_rate': self.__learning_rate,
'num_iterations': self.__num_iterations,
'weights': self.__parameters,
}
if self.__costs is not None:
params['costs'] = self.__costs
return (params)
def set_params(self, params):
self.__parameters = params
# ========================================================================
# Gradient checking to debug backpropagation step
# ========================================================================
def gradient_checking(self, X, Y, epsilon=1e-07):
temp = self.__parameters # save initial model parameters
def grad_approx(Y, theta, sizes, epsilon=1e-07):
"""
Computes an approximation of the gradient
"""
dtheta_approx = []
J_plus = []
J_minus = []
dim = len(theta)
for i in range(dim):
thetaplus = np.copy(theta)
thetaminus = np.copy(theta)
thetaplus[i] += epsilon
thetaminus[i] -= epsilon
thetaplus_dict = debug_utils.vector_to_dictionnary(
thetaplus, self.__layers_dims, sizes)
thetaminus_dict = debug_utils.vector_to_dictionnary(
thetaminus, self.__layers_dims, sizes)
self.__parameters = thetaplus_dict
AL, _ = self.__deep_forward_propagation(
X) # Vector of probabilities
J_plus = performance.compute_cost(
Y, AL, thetaplus_dict, self.__lambd)
self.__parameters = thetaminus_dict
AL, _ = self.__deep_forward_propagation(
X) # Vector of probabilities
J_minus = performance.compute_cost(
Y, AL, thetaminus_dict, self.__lambd)
dtheta_approx.append(
(J_plus - J_minus) / (2 * epsilon))
return np.array(dtheta_approx).reshape((dim, 1))
def compare_gradients(dtheta_backprop, dtheta_approx, epsilon=1e-07):
numerator = np.linalg.norm(dtheta_backprop - dtheta_approx)
denominator = np.linalg.norm(dtheta_approx) + \
np.linalg.norm(dtheta_backprop)
difference = numerator / denominator
if difference > 2 * epsilon:
CRED = '\033[91m'
CEND = '\033[0m'
print(CRED +
"There is a mistake in the backward propagation! difference = " +
str(difference) + ', epsilon = ' + str(epsilon) + CEND)
else:
print("\033[92m" +
"Your backward propagation works perfectly fine! difference = " +
str(difference) + ', epsilon = ' + str(epsilon) + "\033[0m")
# Getting the grads of backprop
# Performing forward prop
AL, deep_forward_caches = self.__deep_forward_propagation(
X)
# Performing backprop to get the grads
grads = self.__deep_back_propagation(AL, Y, deep_forward_caches)
# Gradient checking
theta, sizes = debug_utils.dictionnary_to_vector(
self.__parameters)
dtheta_backprop, sizes = debug_utils.dictionnary_to_vector(
grads)
dtheta_approx = grad_approx(Y, theta, sizes)
compare_gradients(dtheta_backprop, dtheta_approx)
self.__parameters = temp
| 38.410256 | 127 | 0.544949 |
fd4179ec25a4cec13f729c1f71ef46e207b2dd2c | 2,817 | py | Python | tests/test_modules/test_pmac/test_rawmotorcspart.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | tests/test_modules/test_pmac/test_rawmotorcspart.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | tests/test_modules/test_pmac/test_rawmotorcspart.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | import unittest
from mock import patch
from malcolm.core import Process, AlarmSeverity
from malcolm.modules.builtin.controllers import StatefulController
from malcolm.modules.pmac.parts import RawMotorCSPart
class castr(str):
ok = True
severity = 0
class caenum(int):
ok = True
severity = 0
enums = ["ANYTHING", "BRICK1CS1", "BRICK1CS2"]
@patch("malcolm.modules.ca.util.catools")
class TestRawMotorCSPart(unittest.TestCase):
def setUp(self):
self.process = Process("proc")
self.o = RawMotorCSPart("cs", "PV:PRE")
c = StatefulController("mri")
c.add_part(self.o)
self.process.add_controller(c)
self.b = self.process.block_view("mri")
self.addCleanup(self.process.stop)
def do_init(self, catools):
catools.caget.side_effect = [[
caenum(2), castr("I"),
caenum(1), castr("A")
]]
self.process.start()
def test_init(self, catools):
self.do_init(catools)
catools.caget.assert_called_once_with(
["PV:PRE:CsPort", "PV:PRE:CsAxis", "PV:PRE:CsPort_RBV",
"PV:PRE:CsAxis_RBV"], format=catools.FORMAT_CTRL)
assert list(self.b) == [
'meta', 'health', 'state', 'disable', 'reset', 'cs']
assert self.b.cs.value == "BRICK1CS1,A"
def test_update_axis(self, catools):
self.do_init(catools)
update = castr("I")
self.o._update_value(update, 1)
assert self.b.cs.value == "BRICK1CS1,I"
def test_update_port(self, catools):
self.do_init(catools)
update = caenum(2)
self.o._update_value(update, 0)
assert self.b.cs.value == "BRICK1CS2,A"
def test_update_disconnect(self, catools):
self.do_init(catools)
update = caenum(0)
self.o._update_value(update, 0)
assert self.b.cs.value == ""
def test_update_bad(self, catools):
self.do_init(catools)
update = castr("")
update.ok = False
self.o._update_value(update, 1)
assert self.b.cs.value == ""
assert self.b.cs.alarm.severity == AlarmSeverity.INVALID_ALARM
def test_caput(self, catools):
self.do_init(catools)
catools.caget.side_effect = [[caenum(2), castr("Y")]]
self.o.caput("BRICK1CS2,X")
catools.caput.assert_called_once_with(
['PV:PRE:CsPort', 'PV:PRE:CsAxis'], (2, 'X'), wait=True
)
assert self.b.cs.value == "BRICK1CS2,Y"
def test_caput_none(self, catools):
self.do_init(catools)
catools.caget.side_effect = [[caenum(0), castr("")]]
self.o.caput("")
catools.caput.assert_called_once_with(
['PV:PRE:CsPort', 'PV:PRE:CsAxis'], (0, ''), wait=True
)
assert self.b.cs.value == ""
| 31.3 | 70 | 0.604189 |
e8b1d5ffd3cecab7f1dc3b8ff06c8af55ea4639b | 382 | py | Python | homedisplay/info_ext_pages/models.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 1 | 2016-11-28T04:35:06.000Z | 2016-11-28T04:35:06.000Z | homedisplay/info_ext_pages/models.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 160 | 2015-01-01T20:59:29.000Z | 2016-04-25T13:36:52.000Z | homedisplay/info_ext_pages/models.py | ojarva/home-info-display | 873d022308732baff94d0dc2381cf9dc7dce23b7 | [
"BSD-3-Clause"
] | 1 | 2015-02-25T21:24:01.000Z | 2015-02-25T21:24:01.000Z | from django.db import models
class ExtPage(models.Model):
timestamp = models.DateTimeField(auto_now_add=True)
url = models.CharField(max_length=1024)
def __unicode__(self):
return u"%s" % self.url
class Meta:
ordering = ("timestamp", )
verbose_name = "Osoite"
verbose_name_plural = "Osoitteet"
get_latest_by = "timestamp"
| 23.875 | 55 | 0.65445 |
26c34d0677c8017cc322f8c6e46efd6ef198b6ef | 1,518 | py | Python | src/shinymud/models/script.py | shinymud/ShinyMUD | 3f659d8be4468c9a8745b8797f5f96c2bc86533c | [
"MIT"
] | 35 | 2015-01-06T12:01:38.000Z | 2022-01-22T13:57:26.000Z | src/shinymud/models/script.py | shinymud/ShinyMUD | 3f659d8be4468c9a8745b8797f5f96c2bc86533c | [
"MIT"
] | 1 | 2021-06-24T13:21:16.000Z | 2021-07-06T18:46:06.000Z | src/shinymud/models/script.py | shinymud/ShinyMUD | 3f659d8be4468c9a8745b8797f5f96c2bc86533c | [
"MIT"
] | 8 | 2015-02-04T16:30:52.000Z | 2021-02-03T15:02:38.000Z | from shinymud.modes.text_edit_mode import TextEditMode
from shinymud.models import Model, Column, model_list
from shinymud.models.shiny_types import *
import re
class Script(Model):
"""A model that represents an in-game script object."""
db_table_name = 'script'
db_columns = Model.db_columns + [
Column('area', type="INTEGER", read=read_area, write=write_area),
Column('name', default='New Script'),
Column('body', default=''),
Column('id')
]
def __str__(self):
string = (' Script %s in Area %s ' % (self.id, self.area.name)
).center(50, '-') + '\n'
body = '\n '.join([line for line in self.body.split('\n')])
if not body:
body = 'Script is empty.'
string += "Name: %s\nBody:\n %s" % (self.name, body)
string += '\n' + ('-' * 50)
return string
def build_set_name(self, name, player=None):
"""Set the name of this script item."""
if not name:
return 'Set the name of the the script to what?'
self.name = name
self.save()
return 'Script %s\'s name has been set to "%s".' % (self.id, self.name)
def build_set_body(self, body, player=None):
"""Set the body of this script item."""
player.last_mode = player.mode
player.mode = TextEditMode(player, self, 'body', self.body, 'script')
return 'ENTERING TextEditMode: type "@help" for help.\n'
model_list.register(Script) | 36.142857 | 79 | 0.582345 |
df5220252fb45fbdb498a7804ea26cee7b89f833 | 21,885 | py | Python | nilearn/datasets/struct.py | ariekahn/nilearn | baa77b18ecee7c4507579214af59d715cc9292f9 | [
"BSD-2-Clause"
] | 1 | 2020-04-01T21:56:17.000Z | 2020-04-01T21:56:17.000Z | nilearn/datasets/struct.py | ariekahn/nilearn | baa77b18ecee7c4507579214af59d715cc9292f9 | [
"BSD-2-Clause"
] | 1 | 2019-04-17T15:23:42.000Z | 2019-04-17T19:59:16.000Z | nilearn/datasets/struct.py | ariekahn/nilearn | baa77b18ecee7c4507579214af59d715cc9292f9 | [
"BSD-2-Clause"
] | 1 | 2017-03-06T05:14:46.000Z | 2017-03-06T05:14:46.000Z | """
Downloading NeuroImaging datasets: structural datasets
"""
import warnings
import os
import numpy as np
from scipy import ndimage
from sklearn.utils import Bunch
from .utils import (_get_dataset_dir, _fetch_files,
_get_dataset_descr, _uncompress_file)
from .._utils import check_niimg, niimg
from ..image import new_img_like, get_data
_package_directory = os.path.dirname(os.path.abspath(__file__))
# Useful for the very simple examples
MNI152_FILE_PATH = os.path.join(_package_directory, "data",
"avg152T1_brain.nii.gz")
FSAVERAGE5_PATH = os.path.join(_package_directory, "data", "fsaverage5")
def fetch_icbm152_2009(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the ICBM152 template (dated 2009)
Parameters
----------
data_dir: string, optional
Path of the data directory. Used to force data storage in a non-
standard location. Default: None (meaning: default)
url: string, optional
Download URL of the dataset. Overwrite the default URL.
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, interest keys are:
"t1", "t2", "t2_relax", "pd": anatomical images obtained with the
given modality (resp. T1, T2, T2 relaxometry and proton
density weighted). Values are file paths.
"gm", "wm", "csf": segmented images, giving resp. gray matter,
white matter and cerebrospinal fluid. Values are file paths.
"eye_mask", "face_mask", "mask": use these images to mask out
parts of mri images. Values are file paths.
References
----------
VS Fonov, AC Evans, K Botteron, CR Almli, RC McKinstry, DL Collins
and BDCG, "Unbiased average age-appropriate atlases for pediatric studies",
NeuroImage,Volume 54, Issue 1, January 2011
VS Fonov, AC Evans, RC McKinstry, CR Almli and DL Collins,
"Unbiased nonlinear average age-appropriate brain templates from birth
to adulthood", NeuroImage, Volume 47, Supplement 1, July 2009, Page S102
Organization for Human Brain Mapping 2009 Annual Meeting.
DL Collins, AP Zijdenbos, WFC Baare and AC Evans,
"ANIMAL+INSECT: Improved Cortical Structure Segmentation",
IPMI Lecture Notes in Computer Science, 1999, Volume 1613/1999, 210-223
Notes
-----
For more information about this dataset's structure:
http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009
The original download URL is
http://www.bic.mni.mcgill.ca/~vfonov/icbm/2009/mni_icbm152_nlin_sym_09a_nifti.zip
"""
if url is None:
# The URL can be retrieved from the nilearn account on OSF (Open
# Science Framework), https://osf.io/4r3jt/quickfiles/
# Clicking on the "share" button gives the root of the URL.
url = "https://osf.io/7pj92/download"
opts = {'uncompress': True}
keys = ("csf", "gm", "wm",
"pd", "t1", "t2", "t2_relax",
"eye_mask", "face_mask", "mask")
filenames = [(os.path.join("mni_icbm152_nlin_sym_09a", name), url, opts)
for name in (
"mni_icbm152_csf_tal_nlin_sym_09a.nii.gz",
"mni_icbm152_gm_tal_nlin_sym_09a.nii.gz",
"mni_icbm152_wm_tal_nlin_sym_09a.nii.gz",
"mni_icbm152_pd_tal_nlin_sym_09a.nii.gz",
"mni_icbm152_t1_tal_nlin_sym_09a.nii.gz",
"mni_icbm152_t2_tal_nlin_sym_09a.nii.gz",
"mni_icbm152_t2_relx_tal_nlin_sym_09a.nii.gz",
"mni_icbm152_t1_tal_nlin_sym_09a_eye_mask.nii.gz",
"mni_icbm152_t1_tal_nlin_sym_09a_face_mask.nii.gz",
"mni_icbm152_t1_tal_nlin_sym_09a_mask.nii.gz")]
dataset_name = 'icbm152_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume,
verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict([('description', fdescr)] + list(zip(keys, sub_files)))
return Bunch(**params)
def load_mni152_template():
"""Load skullstripped 2mm version of the MNI152 originally distributed
with FSL
Returns
-------
mni152_template: nibabel object corresponding to the template
References
----------
VS Fonov, AC Evans, K Botteron, CR Almli, RC McKinstry, DL Collins and
BDCG, Unbiased average age-appropriate atlases for pediatric studies,
NeuroImage, Volume 54, Issue 1, January 2011, ISSN 1053-8119, DOI:
10.1016/j.neuroimage.2010.07.033
VS Fonov, AC Evans, RC McKinstry, CR Almli and DL Collins, Unbiased
nonlinear average age-appropriate brain templates from birth to adulthood,
NeuroImage, Volume 47, Supplement 1, July 2009, Page S102 Organization for
Human Brain Mapping 2009 Annual Meeting, DOI: 10.1016/S1053-8119(09)70884-5
"""
return check_niimg(MNI152_FILE_PATH)
def load_mni152_brain_mask():
"""Load brain mask from MNI152 T1 template
.. versionadded:: 0.2.5
Returns
-------
mask_img: Nifti-like mask image corresponding to grey and white matter.
References
----------
Refer to load_mni152_template function for more information about the MNI152
T1 template
See Also
--------
nilearn.datasets.load_mni152_template : for details about version of the
MNI152 T1 template and related.
"""
# Load MNI template
target_img = load_mni152_template()
mask_voxels = (get_data(target_img) > 0).astype(int)
mask_img = new_img_like(target_img, mask_voxels)
return mask_img
def fetch_icbm152_brain_gm_mask(data_dir=None, threshold=0.2, resume=True,
verbose=1):
"""Downloads ICBM152 template first, then loads 'gm' mask image.
.. versionadded:: 0.2.5
Parameters
----------
data_dir: str, optional
Path of the data directory. Used to force storage in a specified
location. Defaults to None.
threshold: float, optional
The parameter which amounts to include the values in the mask image.
The values lies above than this threshold will be included. Defaults
to 0.2 (one fifth) of values.
resume: bool, optional
If True, try resuming partially downloaded data. Defaults to True.
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
gm_mask_img: Nifti image
Corresponding to brain grey matter from ICBM152 template.
Notes
-----
This function relies on ICBM152 templates where we particularly pick
grey matter template and threshold the template at .2 to take one fifth
of the values. Then, do a bit post processing such as binary closing
operation to more compact mask image.
Note: It is advised to check the mask image with your own data processing.
See Also
--------
nilearn.datasets.fetch_icbm152_2009: for details regarding the ICBM152
template.
nilearn.datasets.load_mni152_template: for details about version of MNI152
template and related.
"""
# Fetching ICBM152 grey matter mask image
icbm = fetch_icbm152_2009(data_dir=data_dir, resume=resume, verbose=verbose)
gm = icbm['gm']
gm_img = check_niimg(gm)
gm_data = niimg._safe_get_data(gm_img)
# getting one fifth of the values
gm_mask = (gm_data > threshold)
gm_mask = ndimage.binary_closing(gm_mask, iterations=2)
gm_mask_img = new_img_like(gm_img, gm_mask)
return gm_mask_img
def fetch_oasis_vbm(n_subjects=None, dartel_version=True, data_dir=None,
url=None, resume=True, verbose=1):
"""Download and load Oasis "cross-sectional MRI" dataset (416 subjects).
Parameters
----------
n_subjects: int, optional
The number of subjects to load. If None is given, all the
subjects are used.
dartel_version: boolean,
Whether or not to use data normalized with DARTEL instead of standard
SPM8 normalization.
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
url: string, optional
Override download URL. Used for test only (or if you setup a mirror of
the data).
resume: bool, optional
If true, try resuming download if possible
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
data: Bunch
Dictionary-like object, the interest attributes are :
- 'gray_matter_maps': string list
Paths to nifti gray matter density probability maps
- 'white_matter_maps' string list
Paths to nifti white matter density probability maps
- 'ext_vars': np.recarray
Data from the .csv file with information about selected subjects
- 'data_usage_agreement': string
Path to the .txt file containing the data usage agreement.
References
----------
* http://www.oasis-brains.org/
* Open Access Series of Imaging Studies (OASIS): Cross-sectional MRI
Data in Young, Middle Aged, Nondemented, and Demented Older Adults.
Marcus, D. S and al., 2007, Journal of Cognitive Neuroscience.
Notes
-----
In the DARTEL version, original Oasis data have been preprocessed
with the following steps:
1. Dimension swapping (technically required for subsequent steps)
2. Brain Extraction
3. Segmentation with SPM8
4. Normalization using DARTEL algorithm
5. Modulation
6. Replacement of NaN values with 0 in gray/white matter density maps.
7. Resampling to reduce shape and make it correspond to the shape of
the non-DARTEL data (fetched with dartel_version=False).
8. Replacement of values < 1e-4 with zeros to reduce the file size.
In the non-DARTEL version, the following steps have been performed instead:
1. Dimension swapping (technically required for subsequent steps)
2. Brain Extraction
3. Segmentation and normalization to a template with SPM8
4. Modulation
5. Replacement of NaN values with 0 in gray/white matter density maps.
An archive containing the gray and white matter density probability maps
for the 416 available subjects is provided. Gross outliers are removed and
filtered by this data fetcher (DARTEL: 13 outliers; non-DARTEL: 1 outlier)
Externals variates (age, gender, estimated intracranial volume,
years of education, socioeconomic status, dementia score) are provided
in a CSV file that is a copy of the original Oasis CSV file. The current
downloader loads the CSV file and keeps only the lines corresponding to
the subjects that are actually demanded.
The Open Access Structural Imaging Series (OASIS) is a project
dedicated to making brain imaging data openly available to the public.
Using data available through the OASIS project requires agreeing with
the Data Usage Agreement that can be found at
http://www.oasis-brains.org/app/template/UsageAgreement.vm
"""
# check number of subjects
if n_subjects is None:
n_subjects = 403 if dartel_version else 415
if dartel_version: # DARTEL version has 13 identified outliers
if n_subjects > 403:
warnings.warn('Only 403 subjects are available in the '
'DARTEL-normalized version of the dataset. '
'All of them will be used instead of the wanted %d'
% n_subjects)
n_subjects = 403
else: # all subjects except one are available with non-DARTEL version
if n_subjects > 415:
warnings.warn('Only 415 subjects are available in the '
'non-DARTEL-normalized version of the dataset. '
'All of them will be used instead of the wanted %d'
% n_subjects)
n_subjects = 415
if n_subjects < 1:
raise ValueError("Incorrect number of subjects (%d)" % n_subjects)
# pick the archive corresponding to preprocessings type
if url is None:
if dartel_version:
url_images = ('https://www.nitrc.org/frs/download.php/'
'6364/archive_dartel.tgz?i_agree=1&download_now=1')
else:
url_images = ('https://www.nitrc.org/frs/download.php/'
'6359/archive.tgz?i_agree=1&download_now=1')
# covariates and license are in separate files on NITRC
url_csv = ('https://www.nitrc.org/frs/download.php/'
'6348/oasis_cross-sectional.csv?i_agree=1&download_now=1')
url_dua = ('https://www.nitrc.org/frs/download.php/'
'6349/data_usage_agreement.txt?i_agree=1&download_now=1')
else: # local URL used in tests
url_csv = url + "/oasis_cross-sectional.csv"
url_dua = url + "/data_usage_agreement.txt"
if dartel_version:
url_images = url + "/archive_dartel.tgz"
else:
url_images = url + "/archive.tgz"
opts = {'uncompress': True}
# missing subjects create shifts in subjects ids
missing_subjects = [8, 24, 36, 48, 89, 93, 100, 118, 128, 149, 154,
171, 172, 175, 187, 194, 196, 215, 219, 225, 242,
245, 248, 251, 252, 257, 276, 297, 306, 320, 324,
334, 347, 360, 364, 391, 393, 412, 414, 427, 436]
if dartel_version:
# DARTEL produces outliers that are hidden by nilearn API
removed_outliers = [27, 57, 66, 83, 122, 157, 222, 269, 282, 287,
309, 428]
missing_subjects = sorted(missing_subjects + removed_outliers)
file_names_gm = [
(os.path.join(
"OAS1_%04d_MR1",
"mwrc1OAS1_%04d_MR1_mpr_anon_fslswapdim_bet.nii.gz")
% (s, s),
url_images, opts)
for s in range(1, 457) if s not in missing_subjects][:n_subjects]
file_names_wm = [
(os.path.join(
"OAS1_%04d_MR1",
"mwrc2OAS1_%04d_MR1_mpr_anon_fslswapdim_bet.nii.gz")
% (s, s),
url_images, opts)
for s in range(1, 457) if s not in missing_subjects]
else:
# only one gross outlier produced, hidden by nilearn API
removed_outliers = [390]
missing_subjects = sorted(missing_subjects + removed_outliers)
file_names_gm = [
(os.path.join(
"OAS1_%04d_MR1",
"mwc1OAS1_%04d_MR1_mpr_anon_fslswapdim_bet.nii.gz")
% (s, s),
url_images, opts)
for s in range(1, 457) if s not in missing_subjects][:n_subjects]
file_names_wm = [
(os.path.join(
"OAS1_%04d_MR1",
"mwc2OAS1_%04d_MR1_mpr_anon_fslswapdim_bet.nii.gz")
% (s, s),
url_images, opts)
for s in range(1, 457) if s not in missing_subjects]
file_names_extvars = [("oasis_cross-sectional.csv", url_csv, {})]
file_names_dua = [("data_usage_agreement.txt", url_dua, {})]
# restrict to user-specified number of subjects
file_names_gm = file_names_gm[:n_subjects]
file_names_wm = file_names_wm[:n_subjects]
file_names = (file_names_gm + file_names_wm +
file_names_extvars + file_names_dua)
dataset_name = 'oasis1'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files = _fetch_files(data_dir, file_names, resume=resume,
verbose=verbose)
# Build Bunch
gm_maps = files[:n_subjects]
wm_maps = files[n_subjects:(2 * n_subjects)]
ext_vars_file = files[-2]
data_usage_agreement = files[-1]
# Keep CSV information only for selected subjects
csv_data = np.recfromcsv(ext_vars_file)
# Comparisons to recfromcsv data must be bytes.
actual_subjects_ids = [("OAS1" +
str.split(os.path.basename(x),
"OAS1")[1][:9]).encode()
for x in gm_maps]
subject_mask = np.asarray([subject_id in actual_subjects_ids
for subject_id in csv_data['id']])
csv_data = csv_data[subject_mask]
fdescr = _get_dataset_descr(dataset_name)
return Bunch(
gray_matter_maps=gm_maps,
white_matter_maps=wm_maps,
ext_vars=csv_data,
data_usage_agreement=data_usage_agreement,
description=fdescr)
def fetch_surf_fsaverage(mesh='fsaverage5', data_dir=None):
""" Download a Freesurfer fsaverage surface
Parameters
----------
mesh: str, optional (default='fsaverage5')
Which mesh to fetch.
'fsaverage5': the low-resolution fsaverage5 mesh (10242 nodes)
'fsaverage5_sphere': the low-resolution fsaverage5 spheres (10242 nodes)
'fsaverage': the high-resolution fsaverage mesh (163842 nodes)
(high-resolution fsaverage will result in
more computation time and memory usage)
data_dir: str, optional (default=None)
Path of the data directory. Used to force data storage in a specified
location.
Returns
-------
data: sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are :
- 'pial_left': Gifti file, left hemisphere pial surface mesh
- 'pial_right': Gifti file, right hemisphere pial surface mesh
- 'infl_left': Gifti file, left hemisphere inflated pial surface mesh
- 'infl_right': Gifti file, right hemisphere inflated pial
surface mesh
- 'sulc_left': Gifti file, left hemisphere sulcal depth data
- 'sulc_right': Gifti file, right hemisphere sulcal depth data
References
----------
Fischl et al, (1999). High-resolution intersubject averaging and a
coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284.
"""
meshes = {'fsaverage5': _fetch_surf_fsaverage5,
'fsaverage5_sphere': _fetch_surf_fsaverage5_sphere,
'fsaverage': _fetch_surf_fsaverage}
if mesh not in meshes:
raise ValueError(
"'mesh' should be one of {}; {!r} was provided".format(
list(meshes.keys()), mesh))
return meshes[mesh](data_dir=data_dir)
def _fetch_surf_fsaverage(data_dir=None):
"""Helper function to ship fsaverage (highest resolution) surfaces
and sulcal information with Nilearn.
The source of the data is downloaded from nitrc.
"""
dataset_dir = _get_dataset_dir('fsaverage', data_dir=data_dir)
url = 'https://www.nitrc.org/frs/download.php/10846/fsaverage.tar.gz'
if not os.path.isdir(os.path.join(dataset_dir, 'fsaverage')):
_fetch_files(dataset_dir, [('fsaverage.tar.gz', url, {})])
_uncompress_file(os.path.join(dataset_dir, 'fsaverage.tar.gz'))
result = {
name: os.path.join(dataset_dir, 'fsaverage', '{}.gii'.format(name))
for name in ['pial_right', 'sulc_right', 'sulc_left', 'pial_left']}
result['infl_left'] = os.path.join(
dataset_dir, 'fsaverage', 'inflated_left.gii')
result['infl_right'] = os.path.join(
dataset_dir, 'fsaverage', 'inflated_right.gii')
result['description'] = str(_get_dataset_descr('fsaverage'))
return Bunch(**result)
def _fetch_surf_fsaverage5(data_dir=None, url=None, resume=True, verbose=1):
"""Helper function to ship fsaverage5 surfaces and sulcal information
with Nilearn.
The source of the data is coming from nitrc based on this PR #1016.
Manually downloaded gzipped and shipped with this function.
Shipping is done with Nilearn based on issue #1705.
"""
dataset_name = 'fsaverage5'
# Dataset description
fdescr = _get_dataset_descr(dataset_name)
# Download fsaverage surfaces and sulcal information
surface_file = '%s.%s.gii.gz'
surface_path = os.path.join(FSAVERAGE5_PATH, surface_file)
pials = []
infls = []
sulcs = []
for hemi in ['left', 'right']:
# pial
pial_path = surface_path % ('pial', hemi)
pials.append(pial_path)
# pial_inflated
pial_infl_path = surface_path % ('pial_inflated', hemi)
infls.append(pial_infl_path)
# sulcal
sulc = surface_path % ('sulc', hemi)
sulcs.append(sulc)
return Bunch(pial_left=pials[0],
pial_right=pials[1],
infl_left=infls[0],
infl_right=infls[1],
sulc_left=sulcs[0],
sulc_right=sulcs[1],
description=fdescr)
def _fetch_surf_fsaverage5_sphere(data_dir=None):
"""Helper function to ship fsaverage5 spherical meshes.
These meshes can be used for visualization purposes, but also to run
cortical surface-based searchlight decoding.
The source of the data is downloaded from OSF.
"""
fsaverage_dir = _get_dataset_dir('fsaverage', data_dir=data_dir)
dataset_dir = _get_dataset_dir('fsaverage5_sphere', data_dir=fsaverage_dir)
url = 'https://osf.io/b79fy/download'
opts = {'uncompress': True}
names = ['sphere_right', 'sphere_left']
filenames = [('{}.gii'.format(name), url, opts)
for name in names]
_fetch_files(dataset_dir, filenames)
result = {
name: os.path.join(dataset_dir, '{}.gii'.format(name))
for name in names}
result['description'] = str(_get_dataset_descr('fsaverage5_sphere'))
return Bunch(**result)
| 38.52993 | 85 | 0.64775 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.