hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7957fcd6e551a17a5937d8e9a1e0800613f5e28f | 2,871 | py | Python | fbpcp/gateway/costexplorer.py | corbantek/fbpcp | ed94bde5db40e4713f26dd7e0697c010a5d87e52 | [
"MIT"
] | 24 | 2021-08-24T09:30:18.000Z | 2022-03-28T20:51:01.000Z | fbpcp/gateway/costexplorer.py | corbantek/fbpcp | ed94bde5db40e4713f26dd7e0697c010a5d87e52 | [
"MIT"
] | 142 | 2021-08-16T23:49:27.000Z | 2022-03-31T21:05:04.000Z | fbpcp/gateway/costexplorer.py | corbantek/fbpcp | ed94bde5db40e4713f26dd7e0697c010a5d87e52 | [
"MIT"
] | 23 | 2021-09-10T22:55:04.000Z | 2022-03-25T18:11:25.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from typing import Optional, Dict, Any
import boto3
from fbpcp.decorator.error_handler import error_handler
from fbpcp.entity.cloud_cost import CloudCost
from fbpcp.gateway.aws import AWSGateway
from fbpcp.mapper.aws import map_cecost_to_cloud_cost
COST_GRANULARITY = "DAILY"
class CostExplorerGateway(AWSGateway):
def __init__(
self,
access_key_id: Optional[str] = None,
access_key_data: Optional[str] = None,
config: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
access_key_id=access_key_id, access_key_data=access_key_data, config=config
)
# pyre-ignore
self.client = boto3.client("ce", **self.config)
@error_handler
def get_cost(
self,
start_date: str,
end_date: str,
region: Optional[str] = None,
) -> CloudCost:
"""
Get cost between start_date and end_date from CostExplorer API using get_cost_and_usage()
get_cost_and_usage() referece: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ce.html#CostExplorer.Client.get_cost_and_usage
:param start_date: start date for cost, required format "yyyy-mm-dd" (e.g "2020-12-01")
:param end_date: end date for cost, required format "yyyy-mm-dd" (e.g "2020-12-01")
:param region: region name as additional filter for cost.
:return: CloudCost object that has the total cost and a list of CloudCostItem objects group by region and service. Unit of cost_amount is USD
"""
page_token = None
results_by_time = []
while True:
kwargs = {"NextPageToken": page_token} if page_token else {}
kwargs.update(
{
"Filter": {
"Dimensions": {
"Key": "REGION",
"Values": [region],
}
}
}
if region
else {}
)
client_response = self.client.get_cost_and_usage(
TimePeriod={"Start": start_date, "End": end_date},
Granularity=COST_GRANULARITY,
Metrics=["UnblendedCost"],
GroupBy=[
{"Type": "DIMENSION", "Key": "SERVICE"},
],
**kwargs,
)
results_by_time.extend(client_response.get("ResultsByTime"))
page_token = client_response.get("NextPageToken")
if not page_token:
break
return map_cecost_to_cloud_cost(results_by_time)
| 35.8875 | 160 | 0.591432 |
7957ff61236aa1c98e22ffd57273181deceb02e8 | 3,650 | py | Python | tests/__init__.py | zyndagj/pymethyl | af2b05929c75965c3264ee101a2cf93875fdc164 | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | zyndagj/pymethyl | af2b05929c75965c3264ee101a2cf93875fdc164 | [
"BSD-3-Clause"
] | 1 | 2019-05-02T15:40:31.000Z | 2019-05-06T22:29:33.000Z | tests/__init__.py | zyndagj/Meth5py | af2b05929c75965c3264ee101a2cf93875fdc164 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import unittest, sys, os
try:
from StringIO import StringIO
except:
from io import StringIO
# Import path to test the CLI
#try: from unittest.mock import patch
#except: from mock import patch
# buffer for capturing log info
logStream = StringIO()
# Need to start logger BEFORE importing any pyPlateCalibrate code
import logging
#FORMAT = "[%(levelname)s - %(filename)s:%(lineno)s - %(funcName)15s] %(message)s"
FORMAT = '[%(levelname)s - P-%(process)d - %(filename)s:%(lineno)s - %(msecs)d] %(message)s'
logging.basicConfig(stream=logStream, level=logging.DEBUG, format=FORMAT)
# Now import module
from Meth5py import Meth5py
class TestMeth5py(unittest.TestCase):
def setUp(self):
self.mr = os.path.join(os.path.dirname(__file__), 'test_meth.txt')
self.h5 = os.path.join(os.path.dirname(__file__), 'test_meth.txt.h5')
self.fa = os.path.join(os.path.dirname(__file__), 'test.fa')
def tearDown(self):
## Runs after every test function ##
# Wipe log
logStream.truncate(0)
# Remove h5 file
if os.path.exists(self.h5): os.remove(self.h5)
## Runs after every test function ##
def test_p4_IndexCreation(self):
m5 = Meth5py(self.mr, self.fa, n_cores=4, verbose=True)
m5.close()
#output = logStream.getvalue()
#print(output)
def test_p4_FaiReader(self):
m5 = Meth5py(self.mr, self.fa, n_cores=4, verbose=True)
self.assertEqual(m5.sorted_chroms, ['Chr1','Chr2'])
self.assertEqual(m5.chrom_dict, {'Chr1':20, 'Chr2':20})
m5.close()
def test_p4_Fetch(self):
m5 = Meth5py(self.mr, self.fa, n_cores=4, verbose=True)
self.assertTrue(all(m5.fetch('Chr1', 10, 10)[0] == [2,0,10,20,1,1]))
self.assertTrue(all(m5.fetch('Chr1', end=1)[0] == [-1]*6))
self.assertTrue(all(map(all, m5.fetch('Chr1', 10, 11) == [[2,0,10,20,1,1],[2,0,12,20,1,1]])))
# All regions with no reads
self.assertTrue(all(map(all, m5.fetch('Chr1', 1, 9) == [[-1]*6]*9)))
self.assertTrue(all(map(all, m5.fetch('Chr1', 19,20) == [[-1]*6]*2)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 1,5) == [[-1]*6]*5)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 9,9) == [[-1]*6]*1)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 11,11) == [[-1]*6]*1)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 13,14) == [[-1]*6]*2)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 16,16) == [[-1]*6]*1)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 19,20) == [[-1]*6]*2)))
m5.close()
def test_p1_IndexCreation(self):
m5 = Meth5py(self.mr, self.fa, n_cores=1, verbose=True)
m5.close()
def test_p1_FaiReader(self):
m5 = Meth5py(self.mr, self.fa, n_cores=1, verbose=True)
self.assertEqual(m5.sorted_chroms, ['Chr1','Chr2'])
self.assertEqual(m5.chrom_dict, {'Chr1':20, 'Chr2':20})
m5.close()
def test_p1_Fetch(self):
m5 = Meth5py(self.mr, self.fa, n_cores=1, verbose=True)
self.assertTrue(all(m5.fetch('Chr1', 10, 10)[0] == [2,0,10,20,1,1]))
self.assertTrue(all(m5.fetch('Chr1', end=1)[0] == [-1]*6))
self.assertTrue(all(map(all, m5.fetch('Chr1', 10, 11) == [[2,0,10,20,1,1],[2,0,12,20,1,1]])))
# All regions with no reads
self.assertTrue(all(map(all, m5.fetch('Chr1', 1, 9) == [[-1]*6]*9)))
self.assertTrue(all(map(all, m5.fetch('Chr1', 19,20) == [[-1]*6]*2)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 1,5) == [[-1]*6]*5)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 9,9) == [[-1]*6]*1)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 11,11) == [[-1]*6]*1)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 13,14) == [[-1]*6]*2)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 16,16) == [[-1]*6]*1)))
self.assertTrue(all(map(all, m5.fetch('Chr2', 19,20) == [[-1]*6]*2)))
m5.close()
| 45.061728 | 95 | 0.647945 |
7957ff67c64c3841e8a4b535a152ca1a0fc9095c | 1,882 | py | Python | client.py | HAKSOAT/Jumpy_GRPC | d5b6b035433d89e27dfb497308f1c53ed6816bb0 | [
"Apache-2.0"
] | null | null | null | client.py | HAKSOAT/Jumpy_GRPC | d5b6b035433d89e27dfb497308f1c53ed6816bb0 | [
"Apache-2.0"
] | null | null | null | client.py | HAKSOAT/Jumpy_GRPC | d5b6b035433d89e27dfb497308f1c53ed6816bb0 | [
"Apache-2.0"
] | null | null | null | import grpc
from protos_wrapper import jumpy_pb2
from protos_wrapper import jumpy_pb2_grpc
def get_product(stub):
product_request = jumpy_pb2.ProductRequest(
category=jumpy_pb2.Category(
link="https://www.jumia.com.ng/smart-watch-bands/xiaomi/"
),
index=4
)
response = stub.GetProduct(product_request)
print(response)
return response
def get_products(stub):
product_request = jumpy_pb2.ProductRequest(
category=jumpy_pb2.Category(
link="https://www.jumia.com.ng/smart-watch-bands/xiaomi/"
),
)
response = stub.GetProducts(product_request)
for product in response:
print(product)
return response
def generate_stream_requests():
links = ["https://www.jumia.com.ng/smart-watch-bands/oraimo/",
"https://www.jumia.com.ng/smart-watch-bands/xiaomi/",
"https://www.jumia.com.ng/smart-watch-bands/tecno/"]
for link in links:
product_request = jumpy_pb2.ProductRequest(
category=jumpy_pb2.Category(
link=link
),
)
yield product_request
def get_cheapest_product_per_stream(stub):
response = stub.GetCheapestProduct_Stream(generate_stream_requests())
print(response)
return response
def get_cheapest_product_per_message(stub):
response = stub.GetCheapestProduct_Message(generate_stream_requests())
for i, r in enumerate(response):
print(i, r)
return response
def run():
with grpc.insecure_channel('localhost:50051') as channel:
stub = jumpy_pb2_grpc.JumiaStub(channel)
# get_product(stub)
# get_products(stub)
# get_cheapest_product_per_stream(stub)
get_cheapest_product_per_message(stub)
if __name__ == "__main__":
run() | 27.676471 | 75 | 0.647715 |
7957ffa117d3c0992303d008c4eb0fa6a95696aa | 483 | py | Python | venv/Lib/site-packages/sklearn/svm/libsvm.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/sklearn/svm/libsvm.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/sklearn/svm/libsvm.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
from . import _libsvm
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.svm.libsvm'
correct_import_path = 'sklearn.svm'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_libsvm, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| 26.833333 | 71 | 0.776398 |
7958015afa12b6f92196ca78d69790976a38b1ab | 6,971 | py | Python | tests/test_mail_views.py | dominicrodger/djohno | 1b0891ee661eae4bd313b619fe2cc2b0965763c0 | [
"BSD-2-Clause"
] | 3 | 2015-04-07T13:19:05.000Z | 2016-02-22T08:42:47.000Z | tests/test_mail_views.py | dominicrodger/djohno | 1b0891ee661eae4bd313b619fe2cc2b0965763c0 | [
"BSD-2-Clause"
] | 2 | 2018-01-30T10:41:55.000Z | 2018-03-12T07:15:51.000Z | tests/test_mail_views.py | dominicrodger/djohno | 1b0891ee661eae4bd313b619fe2cc2b0965763c0 | [
"BSD-2-Clause"
] | 2 | 2018-01-30T07:50:06.000Z | 2021-12-01T00:05:04.000Z | from django.core import mail
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
import socket
from mock import Mock, patch
from smtplib import SMTPConnectError
from tests.utils import DjohnoBaseViewTests
class DjohnoMailViewTests(DjohnoBaseViewTests):
def test_djohno_email_frame_with_login(self):
"""
Tests to ensure loading the djohno framed email test view is
successful, and renders a few specific strings.
"""
url = reverse('djohno_frame_email')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Djohno: Email Check')
self.assertContains(response,
'src="%s"' % reverse('djohno_email'))
@override_settings(DEFAULT_FROM_EMAIL='Foobar <foo@bar.com>')
def test_mail_view_complex_from_address(self):
"""
Ensure the mail view correctly sends emails, and sends the
expected text (we have a "pretty" from address).
"""
url = reverse('djohno_email')
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'djohno/email_sent.html')
self.assertEqual(len(mail.outbox), 1)
sent = mail.outbox[0]
self.assertEqual(sent.subject, 'djohno email test')
self.assertTrue(sent.body.find('Congratulations') != -1)
self.assertEqual(sent.body.find('It\'s probably a good'), -1)
self.assertEqual(sent.body.find('\n\n\n'), -1)
self.assertEqual(len(sent.to), 1)
self.assertEqual(sent.to[0], 'foo@example.com')
self.assertContains(response, "successfully sent")
self.assertContains(response, "foo@example.com")
self.assertContains(response, "Foobar <foo@bar.com>")
@override_settings(DEFAULT_FROM_EMAIL='Foobar <foo@bar.com>')
def test_idempotent_mail_view_complex_from_address(self):
"""
Ensure the idempotent mail view correctly parses emails.
"""
url = reverse('djohno_email')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'djohno/email.html')
self.assertEqual(len(mail.outbox), 0)
self.assertContains(response, "foo@example.com")
self.assertContains(response, "Foobar <foo@bar.com>")
@override_settings(DEFAULT_FROM_EMAIL='simple@bar.com')
def test_mail_view_simple_from_address(self):
"""
Ensure the mail view correctly sends emails, and sends the
expected text (we don't have a "pretty" from address, so it
should tell us about that).
"""
url = reverse('djohno_email')
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'djohno/email_sent.html')
self.assertEqual(len(mail.outbox), 1)
sent = mail.outbox[0]
self.assertEqual(sent.subject, 'djohno email test')
self.assertTrue(sent.body.find('Congratulations') != -1)
self.assertNotEqual(sent.body.find('It\'s probably a good'), -1)
self.assertEqual(sent.body.find('\n\n\n'), -1)
self.assertEqual(len(sent.to), 1)
self.assertEqual(sent.to[0], 'foo@example.com')
self.assertContains(response, "successfully sent")
self.assertContains(response, "foo@example.com")
self.assertContains(response, "simple@bar.com")
@override_settings(DEFAULT_FROM_EMAIL='notanemail')
def test_mail_view_invalid_from_address(self):
"""
Ensure the mail view correctly detects invalid from emails.
"""
url = reverse('djohno_email')
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'djohno/bad_email.html')
self.assertTemplateUsed(response,
'djohno/_bad_email_invalid.html')
self.assertEqual(len(mail.outbox), 0)
self.assertContains(response, "notanemail")
@override_settings(DEFAULT_FROM_EMAIL='webmaster@localhost')
def test_mail_view_default_from_address(self):
"""
Ensure the mail view correctly detects the DEFAULT_FROM_EMAIL
settings not being overriden.
"""
url = reverse('djohno_email')
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'djohno/bad_email.html')
self.assertTemplateUsed(response,
'djohno/_bad_email_default.html')
self.assertEqual(len(mail.outbox), 0)
self.assertContains(response, "Your Name")
self.assertContains(response, "you@example.com")
def test_mail_view_smtp_failure(self):
"""
Ensure the mail view correctly handles SMTP failures.
"""
def fake_send_mail(subject, message,
from_email, recipient_list,
fail_silently=False,
auth_user=None, auth_password=None,
connection=None):
raise SMTPConnectError(1337, "SMTP is too awesome")
url = reverse('djohno_email')
with patch('djohno.views.send_mail',
Mock(side_effect=fake_send_mail)):
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'djohno/email_sent.html')
self.assertEqual(len(mail.outbox), 0)
self.assertContains(response, "failed to send")
self.assertContains(response,
"(1337, 'SMTP is too awesome')")
def test_mail_view_socket_failure(self):
"""
Ensure the mail view correctly handles socket failures
(probably fairly unlikely except in local development
scenarios, when you are without an internet connection).
"""
def fake_send_mail(subject, message,
from_email, recipient_list,
fail_silently=False,
auth_user=None, auth_password=None,
connection=None):
raise socket.error(1337, 'Sockets are too awesome')
url = reverse('djohno_email')
with patch('djohno.views.send_mail',
Mock(side_effect=fake_send_mail)):
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'djohno/email_sent.html')
self.assertEqual(len(mail.outbox), 0)
self.assertContains(response, "failed to send")
self.assertContains(response,
"[Errno 1337] Sockets are too awesome")
| 44.120253 | 72 | 0.634916 |
7958021358b8497a9740cae1d6248424b99b99af | 1,239 | py | Python | geneticpython/core/operators/mutation/flip_bit_mutation.py | ngocjr7/geneticpython | 4b4157523ce13b3da56cef61282cb0a984cd317b | [
"MIT"
] | null | null | null | geneticpython/core/operators/mutation/flip_bit_mutation.py | ngocjr7/geneticpython | 4b4157523ce13b3da56cef61282cb0a984cd317b | [
"MIT"
] | null | null | null | geneticpython/core/operators/mutation/flip_bit_mutation.py | ngocjr7/geneticpython | 4b4157523ce13b3da56cef61282cb0a984cd317b | [
"MIT"
] | null | null | null | """
# Problem: flip_bit_mutation.py
# Description:
# Created by ngocjr7 on [2020-03-31 16:49:14]
"""
from __future__ import absolute_import
from geneticpython.models.binary_individual import BinaryIndividual
from .mutation import Mutation
from geneticpython.utils.validation import check_random_state
from random import Random
import random
class FlipBitMutation(Mutation):
def __init__(self, pm : float, pe : float = None):
super(FlipBitMutation, self).__init__(pm=pm)
if pe is None:
pe = pm
if pe <= 0.0 or pe > 1.0:
raise ValueError('Invalid mutation probability')
self.pe = pe
def mutate(self, individual: BinaryIndividual, random_state=None):
random_state = check_random_state(random_state)
do_mutation = True if random_state.random() <= self.pm else False
ret_individual = individual.clone()
if do_mutation:
for i, genome in enumerate(ret_individual.chromosome.genes):
flip = True if random_state.random() <= self.pe else False
if flip:
ret_individual.chromosome.genes[i] = genome^1
return ret_individual
| 31.769231 | 75 | 0.644875 |
79580223dbae06850bf8303693286aa45535a91d | 4,335 | py | Python | torchrppg/nets/blocks/blocks.py | TVS-AI/fed_rppg | 88886554ace264d40b3e2fefd2ef22f61a1f1edf | [
"MIT"
] | null | null | null | torchrppg/nets/blocks/blocks.py | TVS-AI/fed_rppg | 88886554ace264d40b3e2fefd2ef22f61a1f1edf | [
"MIT"
] | null | null | null | torchrppg/nets/blocks/blocks.py | TVS-AI/fed_rppg | 88886554ace264d40b3e2fefd2ef22f61a1f1edf | [
"MIT"
] | null | null | null | import torch
class ConvBlock2D(torch.nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride, padding):
super(ConvBlock2D, self).__init__()
self.conv_block_2d = torch.nn.Sequential(
torch.nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding),
torch.nn.BatchNorm2d(out_channel),
torch.nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv_block_2d(x)
class DeConvBlock3D(torch.nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride, padding):
super(DeConvBlock3D, self).__init__()
self.deconv_block_3d = torch.nn.Sequential(
torch.nn.ConvTranspose3d(in_channel, out_channel, kernel_size, stride, padding),
torch.nn.BatchNorm3d(out_channel),
torch.nn.ELU()
)
def forward(self, x):
return self.deconv_block_3d(x)
class ConvBlock3D(torch.nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride, padding):
super(ConvBlock3D, self).__init__()
self.conv_block_3d = torch.nn.Sequential(
torch.nn.Conv3d(in_channel, out_channel, kernel_size, stride, padding),
torch.nn.BatchNorm3d(out_channel),
torch.nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv_block_3d(x)
class EncoderBlock(torch.nn.Module):
def __init__(self, in_channel, out_channel):
super(EncoderBlock, self).__init__()
self.conv_eb = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm2d(out_channel),
torch.nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm2d(out_channel),
torch.nn.MaxPool2d(kernel_size=2)
)
def forward(self, x):
out = self.conv_eb(x)
return out
class DecoderBlock(torch.nn.Module):
def __init__(self, in_channel, out_channel, scale_factor):
super(DecoderBlock, self).__init__()
self.conv_db = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=scale_factor),
torch.nn.ConvTranspose2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=1,
padding=1),
torch.nn.BatchNorm2d(out_channel),
torch.nn.ConvTranspose2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1,
padding=1),
torch.nn.BatchNorm2d(out_channel)
)
def forward(self, x):
out = self.conv_db(x)
return out
class TSM(torch.nn.Module):
def __init__(self):
super().__init__()
def __call__(self, input, n_frame=4, fold_div=3):
n_frame = 4
B, C, H, W = input.shape
input = input.view(-1, n_frame, H, W, C)
fold = C // fold_div
last_fold = C - (fold_div - 1) * fold
out1, out2, out3 = torch.split(input, [fold, fold, last_fold], -1)
padding1 = torch.zeros_like(out1)
padding1 = padding1[:, -1, :, :, :]
padding1 = torch.unsqueeze(padding1, 1)
_, out1 = torch.split(out1, [1, n_frame - 1], 1)
out1 = torch.cat((out1, padding1), 1)
padding2 = torch.zeros_like(out2)
padding2 = padding2[:, 0, :, :, :]
padding2 = torch.unsqueeze(padding2, 1)
out2, _ = torch.split(out2, [n_frame - 1, 1], 1)
out2 = torch.cat((padding2, out2), 1)
out = torch.cat((out1, out2, out3), -1)
out = out.view([-1, C, H, W])
return out
class TSM_Block(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,padding):
super().__init__()
self.tsm1 = TSM()
self.t_conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
padding=padding)
def forward(self, input, n_frame=2, fold_div=3):
t = self.tsm1(input, n_frame, fold_div)
t = self.t_conv1(t)
return t
| 37.051282 | 116 | 0.600923 |
795803a10f02c649834c1daed7a87804a8426305 | 2,732 | py | Python | basicsr/data/single_image_dataset.py | Cospel/BasicSR | 430dd1989f75b1400d77df97808b2c7896808a43 | [
"Apache-2.0",
"MIT"
] | 3,168 | 2018-04-26T16:11:32.000Z | 2022-03-31T09:37:39.000Z | basicsr/data/single_image_dataset.py | Cospel/BasicSR | 430dd1989f75b1400d77df97808b2c7896808a43 | [
"Apache-2.0",
"MIT"
] | 437 | 2018-06-14T02:05:58.000Z | 2022-03-28T02:37:15.000Z | basicsr/data/single_image_dataset.py | Cospel/BasicSR | 430dd1989f75b1400d77df97808b2c7896808a43 | [
"Apache-2.0",
"MIT"
] | 837 | 2018-06-05T06:52:47.000Z | 2022-03-31T09:58:20.000Z | from os import path as osp
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from basicsr.data.data_util import paths_from_lmdb
from basicsr.utils import FileClient, imfrombytes, img2tensor, scandir
from basicsr.utils.matlab_functions import rgb2ycbcr
from basicsr.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class SingleImageDataset(data.Dataset):
"""Read only lq images in the test phase.
Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc).
There are two modes:
1. 'meta_info_file': Use meta information file to generate paths.
2. 'folder': Scan folders to generate paths.
Args:
opt (dict): Config for train datasets. It contains the following keys:
dataroot_lq (str): Data root path for lq.
meta_info_file (str): Path for meta information file.
io_backend (dict): IO backend type and other kwarg.
"""
def __init__(self, opt):
super(SingleImageDataset, self).__init__()
self.opt = opt
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.mean = opt['mean'] if 'mean' in opt else None
self.std = opt['std'] if 'std' in opt else None
self.lq_folder = opt['dataroot_lq']
if self.io_backend_opt['type'] == 'lmdb':
self.io_backend_opt['db_paths'] = [self.lq_folder]
self.io_backend_opt['client_keys'] = ['lq']
self.paths = paths_from_lmdb(self.lq_folder)
elif 'meta_info_file' in self.opt:
with open(self.opt['meta_info_file'], 'r') as fin:
self.paths = [osp.join(self.lq_folder, line.rstrip().split(' ')[0]) for line in fin]
else:
self.paths = sorted(list(scandir(self.lq_folder, full_path=True)))
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
# load lq image
lq_path = self.paths[index]
img_bytes = self.file_client.get(lq_path, 'lq')
img_lq = imfrombytes(img_bytes, float32=True)
# color space transform
if 'color' in self.opt and self.opt['color'] == 'y':
img_lq = rgb2ycbcr(img_lq, y_only=True)[..., None]
# BGR to RGB, HWC to CHW, numpy to tensor
img_lq = img2tensor(img_lq, bgr2rgb=True, float32=True)
# normalize
if self.mean is not None or self.std is not None:
normalize(img_lq, self.mean, self.std, inplace=True)
return {'lq': img_lq, 'lq_path': lq_path}
def __len__(self):
return len(self.paths)
| 39.028571 | 100 | 0.646779 |
795803a71b1cfba9a511acb0dfb44e60a702e0a4 | 151 | py | Python | config.py | suger-luck/QQ-boot | 4fabd400ecde1cbdf3961af1b34cdeed7dbecfc4 | [
"MIT"
] | null | null | null | config.py | suger-luck/QQ-boot | 4fabd400ecde1cbdf3961af1b34cdeed7dbecfc4 | [
"MIT"
] | null | null | null | config.py | suger-luck/QQ-boot | 4fabd400ecde1cbdf3961af1b34cdeed7dbecfc4 | [
"MIT"
] | null | null | null | from nonebot.default_config import *
#写可以下达指令的人,如果不填写就是所有人,就是可以在聊天中触发的
# SUPERUSERS ={2031058091}
from nonebot.default_config import *
SUPERUSERS = {} | 25.166667 | 36 | 0.807947 |
7958050261b566d1d16d547c73ae0e2dffb95892 | 2,274 | py | Python | moto/apigatewayv2/exceptions.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | moto/apigatewayv2/exceptions.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | 1 | 2022-02-19T02:10:45.000Z | 2022-02-19T02:15:52.000Z | moto/apigatewayv2/exceptions.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | from moto.core.exceptions import JsonRESTError
class APIGatewayV2Error(JsonRESTError):
pass
class ApiNotFound(APIGatewayV2Error):
code = 404
def __init__(self, api_id):
super().__init__(
"NotFoundException", f"Invalid API identifier specified {api_id}"
)
class AuthorizerNotFound(APIGatewayV2Error):
code = 404
def __init__(self, authorizer_id):
super().__init__(
"NotFoundException",
f"Invalid Authorizer identifier specified {authorizer_id}",
)
class ModelNotFound(APIGatewayV2Error):
code = 404
def __init__(self, model_id):
super().__init__(
"NotFoundException", f"Invalid Model identifier specified {model_id}"
)
class RouteResponseNotFound(APIGatewayV2Error):
code = 404
def __init__(self, rr_id):
super().__init__(
"NotFoundException", f"Invalid RouteResponse identifier specified {rr_id}"
)
class BadRequestException(APIGatewayV2Error):
code = 400
def __init__(self, message):
super().__init__("BadRequestException", message)
class IntegrationNotFound(APIGatewayV2Error):
code = 404
def __init__(self, integration_id):
super().__init__(
"NotFoundException",
f"Invalid Integration identifier specified {integration_id}",
)
class IntegrationResponseNotFound(APIGatewayV2Error):
code = 404
def __init__(self, int_res_id):
super().__init__(
"NotFoundException",
f"Invalid IntegrationResponse identifier specified {int_res_id}",
)
class RouteNotFound(APIGatewayV2Error):
code = 404
def __init__(self, route_id):
super().__init__(
"NotFoundException", f"Invalid Route identifier specified {route_id}"
)
class VpcLinkNotFound(APIGatewayV2Error):
code = 404
def __init__(self, vpc_link_id):
super().__init__(
"NotFoundException", f"Invalid VpcLink identifier specified {vpc_link_id}"
)
class UnknownProtocol(APIGatewayV2Error):
def __init__(self):
super().__init__(
"BadRequestException",
"Invalid protocol specified. Must be one of [HTTP, WEBSOCKET]",
)
| 23.6875 | 86 | 0.654793 |
7958051382e2c6913f6e19c25cece17f78575227 | 1,861 | py | Python | shape.py | ricardovobarros/randominn | c556e972092b4688e82709345953930a3ae09d6b | [
"MIT"
] | null | null | null | shape.py | ricardovobarros/randominn | c556e972092b4688e82709345953930a3ae09d6b | [
"MIT"
] | null | null | null | shape.py | ricardovobarros/randominn | c556e972092b4688e82709345953930a3ae09d6b | [
"MIT"
] | null | null | null | """ Module designated to Shape class
Author: Renan Vasconcelos
"""
from config import *
class Shape:
def __init__(self, shape_address="", driver="ESRI Shapefile"):
"""Class that represents a shape file
Attributes
----
address: String of the files' address
name: String of the files' name
driver: Object Driver from Gdal
dataset:Object Dataset from Gdal
layer: Object Layer from Gdal
Methods:
----
shape2dataframe: Creates a geo data frame and
adds two new columns (xy coordinates) to it
Parameters:
____
:param shape_address: String local address of shape file
:param driver: Type of driver to open the shape file
"""
try:
self.address = shape_address
self.name = self.address.split(str(Path("/")))[-1].strip(".shp")
self.driver = ogr.GetDriverByName(driver)
self.dataset = self.driver.Open(shape_address)
self.layer = self.dataset.GetLayer()
logging.info("Shape: {} was read successfully".format(self.name))
except Exception:
logging.error("Shape file could not be read")
@property
def shape2dataframe(self):
"""Creates a Geo data frame from the instantiated object
and add two new columns with the xy coordinates respectively.
This xy tuples are extracted from a columns of the Geopandas
named ["geometry"].
:return: Data frame with x y new columns
"""
try:
df_shape = geopandas.read_file(self.address)
df_shape["x"] = df_shape["geometry"].x
df_shape["y"] = df_shape["geometry"].y
except Exception:
print("Geopandas could not be created")
sys.exit()
return df_shape
| 30.016129 | 77 | 0.601827 |
795805dbd87959553ecf6a7bb28eef9a843e1ec0 | 666 | py | Python | hello-docker/dev-container/test.py | BrianThomasRoss/Docker-Devels | 37b89c9ee62f0dfdfd1303ede3e20cfaa1190895 | [
"Apache-2.0"
] | null | null | null | hello-docker/dev-container/test.py | BrianThomasRoss/Docker-Devels | 37b89c9ee62f0dfdfd1303ede3e20cfaa1190895 | [
"Apache-2.0"
] | null | null | null | hello-docker/dev-container/test.py | BrianThomasRoss/Docker-Devels | 37b89c9ee62f0dfdfd1303ede3e20cfaa1190895 | [
"Apache-2.0"
] | null | null | null | print(
"""
Welcome to the world of dev-containers you are now working from inside
a virtualized instance of a linux ubuntu pre-loaded with miniconda zsh,
and preconfigured conda environment.
Some commands to try in the terminal:
which conda - path to miniconda
conda info --envs - list environments
whoami - username
ls -a - directory listing
uname -a - show kernel information
and other sh bash and zsh commands.
This container always produces the same environment on my computer as yours
the same dependencies.
Have fun
"""
) | 33.3 | 79 | 0.624625 |
795807cd49ff52a45d260c2bcc5fa5b3b5463191 | 14,612 | py | Python | SCons/Tool/mslink.py | hwmaier/scons | f29df5f2339874d49bf1aece6e639fa320b445d9 | [
"MIT"
] | null | null | null | SCons/Tool/mslink.py | hwmaier/scons | f29df5f2339874d49bf1aece6e639fa320b445d9 | [
"MIT"
] | null | null | null | SCons/Tool/mslink.py | hwmaier/scons | f29df5f2339874d49bf1aece6e639fa320b445d9 | [
"MIT"
] | null | null | null | """SCons.Tool.mslink
Tool-specific initialization for the Microsoft linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvc
import SCons.Tool.msvs
import SCons.Util
from .MSCommon import msvc_setup_env_once, msvc_exists
def pdbGenerator(env, target, source, for_signature):
try:
return ['/PDB:%s' % target[0].attributes.pdb, '/DEBUG']
except (AttributeError, IndexError):
return None
def _dllTargets(target, source, env, for_signature, paramtp):
listCmd = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
if dll: listCmd.append("/out:%s"%dll.get_string(for_signature))
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: listCmd.append("/implib:%s"%implib.get_string(for_signature))
return listCmd
def _dllSources(target, source, env, for_signature, paramtp):
listCmd = []
deffile = env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX")
for src in source:
# Check explicitly for a non-None deffile so that the __cmp__
# method of the base SCons.Util.Proxy class used for some Node
# proxies doesn't try to use a non-existent __dict__ attribute.
if deffile and src == deffile:
# Treat this source as a .def file.
listCmd.append("/def:%s" % src.get_string(for_signature))
else:
# Just treat it as a generic source file.
listCmd.append(src)
return listCmd
def windowsShlinkTargets(target, source, env, for_signature):
return _dllTargets(target, source, env, for_signature, 'SHLIB')
def windowsShlinkSources(target, source, env, for_signature):
return _dllSources(target, source, env, for_signature, 'SHLIB')
def _windowsLdmodTargets(target, source, env, for_signature):
"""Get targets for loadable modules."""
return _dllTargets(target, source, env, for_signature, 'LDMODULE')
def _windowsLdmodSources(target, source, env, for_signature):
"""Get sources for loadable modules."""
return _dllSources(target, source, env, for_signature, 'LDMODULE')
def _dllEmitter(target, source, env, paramtp):
"""Common implementation of dll emitter."""
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError('A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp))
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if insert_def not in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and \
(env.get('WINDOWS_INSERT_MANIFEST', 0) or env.get('WINDOWS_EMBED_MANIFEST', 0)):
# MSVC 8 and above automatically generate .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if version_num >= 11.0 and env.get('PCH', 0):
# MSVC 11 and above need the PCH object file to be added to the link line,
# otherwise you get link error LNK2011.
pchobj = SCons.Util.splitext(str(env['PCH']))[0] + '.obj'
# print "prog_emitter, version %s, appending pchobj %s"%(version_num, pchobj)
if pchobj not in extrasources:
extrasources.append(pchobj)
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources)
def windowsLibEmitter(target, source, env):
return _dllEmitter(target, source, env, 'SHLIB')
def ldmodEmitter(target, source, env):
"""Emitter for loadable modules.
Loadable modules are identical to shared libraries on Windows, but building
them is subject to different parameters (LDMODULE*).
"""
return _dllEmitter(target, source, env, 'LDMODULE')
def prog_emitter(target, source, env):
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
exe = env.FindIxes(target, "PROGPREFIX", "PROGSUFFIX")
if not exe:
raise SCons.Errors.UserError("An executable should have exactly one target with the suffix: %s" % env.subst("$PROGSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and \
(env.get('WINDOWS_INSERT_MANIFEST', 0) or env.get('WINDOWS_EMBED_MANIFEST', 0)):
# MSVC 8 and above automatically generate .manifest files that have to be installed
extratargets.append(
env.ReplaceIxes(exe,
"PROGPREFIX", "PROGSUFFIX",
"WINDOWSPROGMANIFESTPREFIX", "WINDOWSPROGMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if version_num >= 11.0 and env.get('PCH', 0):
# MSVC 11 and above need the PCH object file to be added to the link line,
# otherwise you get link error LNK2011.
pchobj = SCons.Util.splitext(str(env['PCH']))[0] + '.obj'
# print("prog_emitter, version %s, appending pchobj %s"%(version_num, pchobj))
if pchobj not in extrasources:
extrasources.append(pchobj)
return (target+extratargets,source+extrasources)
def RegServerFunc(target, source, env):
if 'register' in env and env['register']:
ret = regServerAction([target[0]], [source[0]], env)
if ret:
raise SCons.Errors.UserError("Unable to register %s" % target[0])
else:
print("Registered %s sucessfully" % target[0])
return ret
return 0
# These are the actual actions run to embed the manifest.
# They are only called from the Check versions below.
embedManifestExeAction = SCons.Action.Action('$MTEXECOM')
embedManifestDllAction = SCons.Action.Action('$MTSHLIBCOM')
def embedManifestDllCheck(target, source, env):
"""Function run by embedManifestDllCheckAction to check for existence of manifest
and other conditions, and embed the manifest by calling embedManifestDllAction if so."""
if env.get('WINDOWS_EMBED_MANIFEST', 0):
manifestSrc = target[0].get_abspath() + '.manifest'
if os.path.exists(manifestSrc):
ret = embedManifestDllAction([target[0]], None, env)
if ret:
raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0
def embedManifestExeCheck(target, source, env):
"""Function run by embedManifestExeCheckAction to check for existence of manifest
and other conditions, and embed the manifest by calling embedManifestExeAction if so."""
if env.get('WINDOWS_EMBED_MANIFEST', 0):
manifestSrc = target[0].get_abspath() + '.manifest'
if os.path.exists(manifestSrc):
ret = embedManifestExeAction([target[0]], None, env)
if ret:
raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0
embedManifestDllCheckAction = SCons.Action.Action(embedManifestDllCheck, None)
embedManifestExeCheckAction = SCons.Action.Action(embedManifestExeCheck, None)
regServerAction = SCons.Action.Action("$REGSVRCOM", "$REGSVRCOMSTR")
regServerCheck = SCons.Action.Action(RegServerFunc, None)
shlibLinkAction = SCons.Action.Action('${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES", "$SHLINKCOMSTR")}', '$SHLINKCOMSTR')
compositeShLinkAction = shlibLinkAction + regServerCheck + embedManifestDllCheckAction
ldmodLinkAction = SCons.Action.Action('${TEMPFILE("$LDMODULE $LDMODULEFLAGS $_LDMODULE_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_LDMODULE_SOURCES", "$LDMODULECOMSTR")}', '$LDMODULECOMSTR')
compositeLdmodAction = ldmodLinkAction + regServerCheck + embedManifestDllCheckAction
exeLinkAction = SCons.Action.Action('${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows", "$LINKCOMSTR")}', '$LINKCOMSTR')
compositeLinkAction = exeLinkAction + embedManifestExeCheckAction
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createSharedLibBuilder(env, shlib_suffix='$SHLIBSUFFIX')
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS /dll')
env['_SHLINK_TARGETS'] = windowsShlinkTargets
env['_SHLINK_SOURCES'] = windowsShlinkSources
env['SHLINKCOM'] = compositeShLinkAction
env.Append(SHLIBEMITTER = [windowsLibEmitter])
env.Append(LDMODULEEMITTER = [windowsLibEmitter])
env['LINK'] = 'link'
env['LINKFLAGS'] = SCons.Util.CLVar('/nologo')
env['_PDB'] = pdbGenerator
env['LINKCOM'] = compositeLinkAction
env.Append(PROGEMITTER = [prog_emitter])
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WIN32_INSERT_DEF'] = 0
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['WINDOWS_INSERT_DEF'] = '${WIN32_INSERT_DEF}'
env['WIN32EXPPREFIX'] = ''
env['WIN32EXPSUFFIX'] = '.exp'
env['WINDOWSEXPPREFIX'] = '${WIN32EXPPREFIX}'
env['WINDOWSEXPSUFFIX'] = '${WIN32EXPSUFFIX}'
env['WINDOWSSHLIBMANIFESTPREFIX'] = ''
env['WINDOWSSHLIBMANIFESTSUFFIX'] = '${SHLIBSUFFIX}.manifest'
env['WINDOWSPROGMANIFESTPREFIX'] = ''
env['WINDOWSPROGMANIFESTSUFFIX'] = '${PROGSUFFIX}.manifest'
env['REGSVRACTION'] = regServerCheck
env['REGSVR'] = os.path.join(SCons.Platform.win32.get_system_root(),'System32','regsvr32')
env['REGSVRFLAGS'] = '/s '
env['REGSVRCOM'] = '$REGSVR $REGSVRFLAGS ${TARGET.windows}'
env['WINDOWS_EMBED_MANIFEST'] = 0
env['MT'] = 'mt'
#env['MTFLAGS'] = ['-hashupdate']
env['MTFLAGS'] = SCons.Util.CLVar('/nologo')
# Note: use - here to prevent build failure if no manifest produced.
# This seems much simpler than a fancy system using a function action to see
# if the manifest actually exists before trying to run mt with it.
env['MTEXECOM'] = '-$MT $MTFLAGS -manifest ${TARGET}.manifest $_MANIFEST_SOURCES -outputresource:$TARGET;1'
env['MTSHLIBCOM'] = '-$MT $MTFLAGS -manifest ${TARGET}.manifest $_MANIFEST_SOURCES -outputresource:$TARGET;2'
# TODO Future work garyo 27-Feb-11
env['_MANIFEST_SOURCES'] = None # _windowsManifestSources
# Set-up ms tools paths
msvc_setup_env_once(env)
# Loadable modules are on Windows the same as shared libraries, but they
# are subject to different build parameters (LDMODULE* variables).
# Therefore LDMODULE* variables correspond as much as possible to
# SHLINK*/SHLIB* ones.
SCons.Tool.createLoadableModuleBuilder(env, loadable_module_suffix='$LDMODULESUFFIX')
env['LDMODULE'] = '$SHLINK'
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['_LDMODULE_TARGETS'] = _windowsLdmodTargets
env['_LDMODULE_SOURCES'] = _windowsLdmodSources
env['LDMODULEEMITTER'] = [ldmodEmitter]
env['LDMODULECOM'] = compositeLdmodAction
# Issue #3350
# Change tempfile argument joining character from a space to a newline
# mslink will fail if any single line is too long, but is fine with many lines
# in a tempfile
env['TEMPFILEARGJOIN'] = os.linesep
def exists(env):
return msvc_exists(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 42.976471 | 187 | 0.678962 |
795808ba10868f4f72cda8c6d9855cec318b59e3 | 308 | py | Python | languages/python/src/concepts/P116_Yapf_CodeAfterFormatting.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2019-05-25T10:09:00.000Z | 2022-03-11T09:06:23.000Z | languages/python/src/concepts/P116_Yapf_CodeAfterFormatting.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2020-03-31T04:30:17.000Z | 2020-10-30T07:54:28.000Z | languages/python/src/concepts/P116_Yapf_CodeAfterFormatting.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 4 | 2019-07-12T13:18:56.000Z | 2021-11-17T08:04:55.000Z | # Description: Code AFTER Formatting Using yapf
x = {'a': 37, 'b': 42, 'c': 927}
y = 'hello ' 'world'
z = 'hello ' + 'world'
a = 'hello {}'.format('world')
class foo(object):
def f(self):
return 37 * -2
def g(self, x, y=42):
return y
def f(a):
return 37 - a[42 - x:y ** 3]
| 15.4 | 47 | 0.512987 |
79580947ebd30cbf99a582c5430663eaed1b1d9a | 440 | pyde | Python | Processing Py_!/listing_27/listing_27.pyde | GermogenovAs/2019-fall-polytech-cs | 3d02ed149b5d0468e213d96b5a1aa8263880a3e1 | [
"MIT"
] | null | null | null | Processing Py_!/listing_27/listing_27.pyde | GermogenovAs/2019-fall-polytech-cs | 3d02ed149b5d0468e213d96b5a1aa8263880a3e1 | [
"MIT"
] | null | null | null | Processing Py_!/listing_27/listing_27.pyde | GermogenovAs/2019-fall-polytech-cs | 3d02ed149b5d0468e213d96b5a1aa8263880a3e1 | [
"MIT"
] | null | null | null | def setup():
size(500, 500)
smooth()
counter = 0
def draw():
global counter
noStroke()
fill(10, 50)
rect(-1, -1, width+1, height+1)
ny = sin(counter)*100+200
nx = counter*10
stroke(250)
strokeWeight(20)
line(nx, ny, nx, ny)
counter = counter + 0.1
if(nx > width):
counter = 0
def keyPressed():
if key=='s':
saveFrame("myProcessing.png")
| 13.75 | 35 | 0.515909 |
795809f0ad9bc72ee3d5f8014977536b37bb48f7 | 16,057 | py | Python | bindings/gumjs/generate-runtime.py | stevielavern/frida-gum | f6e1f5cdb1e0876aae81e111a1b286ba2224c3ca | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2020-10-10T16:46:09.000Z | 2020-10-10T16:46:09.000Z | bindings/gumjs/generate-runtime.py | ohjeongwook/frida-gum | 3d2e446b2530b705db0a5fd3fa7b91aa30f319bb | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | bindings/gumjs/generate-runtime.py | ohjeongwook/frida-gum | 3d2e446b2530b705db0a5fd3fa7b91aa30f319bb | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from __future__ import unicode_literals, print_function
from base64 import b64decode
import codecs
import glob
import json
import os
import platform
import re
import subprocess
import sys
def generate_runtime_v8(runtime_name, output_dir, output, inputs):
with codecs.open(os.path.join(output_dir, output), 'wb', 'utf-8') as output_file:
output_file.write("#include \"gumv8bundle.h\"\n")
modules = []
for input_path in inputs:
input_name = os.path.basename(input_path)
base, ext = os.path.splitext(input_name)
input_source_code_identifier = "gumjs_{0}_source_code".format(identifier(base))
input_source_map_identifier = "gumjs_{0}_source_map".format(identifier(base))
with codecs.open(input_path, 'rb', 'utf-8') as input_file:
source_code = input_file.read()
(stripped_source_code, source_map) = extract_source_map(input_name, source_code)
source_code_bytes = bytearray(stripped_source_code.encode('utf-8'))
source_code_bytes.append(0)
source_code_size = len(source_code_bytes)
output_file.write("\nstatic const gchar {0}[{1}] =\n{{".format(input_source_code_identifier, source_code_size))
write_bytes(source_code_bytes, output_file)
output_file.write("\n};\n")
if source_map is not None:
source_map_bytes = bytearray(source_map.encode('utf-8'))
source_map_bytes.append(0)
source_map_size = len(source_map_bytes)
output_file.write("\nstatic const gchar {0}[{1}] =\n{{".format(input_source_map_identifier, source_map_size))
write_bytes(source_map_bytes, output_file)
output_file.write("\n};\n")
modules.append((input_name, input_source_code_identifier, input_source_map_identifier))
else:
modules.append((input_name, input_source_code_identifier, "NULL"))
output_file.write("\nstatic const GumV8RuntimeModule gumjs_{0}_modules[] =\n{{".format(runtime_name))
for filename, source_code_identifier, source_map_identifier in modules:
output_file.write("\n {{ \"{0}\", {1}, {2} }},".format(filename, source_code_identifier, source_map_identifier))
output_file.write("\n { NULL, NULL, NULL }\n};")
def generate_runtime_duk(runtime_name, output_dir, output, input_dir, inputs):
with codecs.open(os.path.join(output_dir, output), 'wb', 'utf-8') as output_file:
output_file.write("#include \"gumdukbundle.h\"\n")
build_os = platform.system().lower()
if build_os == 'windows':
program_suffix = ".exe"
else:
program_suffix = ""
dukcompile = os.path.join(output_dir, "gumdukcompile" + program_suffix)
if not os.path.exists(dukcompile):
dukcompile_sources = list(map(lambda name: os.path.join(input_dir, name), ["gumdukcompile.c", "duktape.c"]))
if build_os == 'windows':
subprocess.check_call(["cl.exe",
"/nologo", "/MT", "/W3", "/O1", "/GL", "/MP",
"/D", "WIN32",
"/D", "_WINDOWS",
"/D", "WINVER=0x0501",
"/D", "_WIN32_WINNT=0x0501",
"/D", "NDEBUG",
"/D", "_CRT_SECURE_NO_WARNINGS",
"/D", "_USING_V110_SDK71_",
"/D", "GUM_DUK_NO_COMPAT"] + dukcompile_sources, cwd=output_dir)
else:
dukcompile_libs = []
if build_os == 'darwin':
sdk = "macosx"
CC = [
subprocess.check_output(["xcrun", "--sdk", sdk, "-f", "clang"]).decode('utf-8').rstrip("\n"),
"-isysroot", subprocess.check_output(["xcrun", "--sdk", sdk, "--show-sdk-path"]).decode('utf-8').rstrip("\n")
]
else:
CC = ["gcc"]
dukcompile_libs.append("-lm")
subprocess.check_call(CC + ["-Wall", "-pipe", "-O1", "-fomit-frame-pointer", "-DGUM_DUK_NO_COMPAT"] +
dukcompile_sources +
["-o", dukcompile] + dukcompile_libs)
modules = []
for input_path in inputs:
input_name = os.path.basename(input_path)
base, ext = os.path.splitext(input_name)
input_name_duk = base + ".duk"
input_path_duk = os.path.join(output_dir, input_name_duk)
input_bytecode_identifier = "gumjs_{0}_bytecode".format(identifier(base))
input_source_map_identifier = "gumjs_{0}_source_map".format(identifier(base))
subprocess.check_call([dukcompile, input_path, input_path_duk])
with open(input_path_duk, 'rb') as duk:
bytecode = duk.read()
bytecode_size = len(bytecode)
output_file.write("\nstatic const guint8 {0}[{1}] =\n{{".format(input_bytecode_identifier, bytecode_size))
write_bytes(bytecode, output_file)
output_file.write("\n};\n")
with codecs.open(input_path, 'rb', 'utf-8') as input_file:
source_code = input_file.read()
(stripped_source_code, source_map) = extract_source_map(input_name, source_code)
if source_map is not None:
source_map_bytes = bytearray(source_map.encode('utf-8'))
source_map_bytes.append(0)
source_map_size = len(source_map_bytes)
output_file.write("\nstatic const gchar {0}[{1}] =\n{{".format(input_source_map_identifier, source_map_size))
write_bytes(source_map_bytes, output_file)
output_file.write("\n};\n")
modules.append((input_bytecode_identifier, bytecode_size, input_source_map_identifier))
else:
modules.append((input_bytecode_identifier, bytecode_size, "NULL"))
output_file.write("\nstatic const GumDukRuntimeModule gumjs_{0}_modules[] =\n{{".format(runtime_name))
for bytecode_identifier, bytecode_size, source_map_identifier in modules:
output_file.write("\n {{ {0}, {1}, {2} }},".format(bytecode_identifier, bytecode_size, source_map_identifier))
output_file.write("\n { NULL, 0, NULL }\n};")
cmodule_function_pattern = re.compile(
r"^(void|size_t|int|unsigned int|bool|const char \*|gpointer|gsize|gssize|gint[0-9]*|guint[0-9]*|gfloat|gdouble|gboolean|(?:const )?\w+ \*|cs_err) ([a-z][a-z0-9_]+)\s?\(",
re.MULTILINE)
cmodule_variable_pattern = re.compile(r"^(extern .+? )(\w+);", re.MULTILINE)
capstone_include_pattern = re.compile(r'^#include "(\w+)\.h"$', re.MULTILINE)
capstone_export_pattern = re.compile(r"^CAPSTONE_EXPORT$", re.MULTILINE)
c_comment_pattern = re.compile(r"\/\*(\*(?!\/)|[^*])*\*\/")
cpp_comment_pattern = re.compile(r"\s+?\/\/.+")
def generate_runtime_cmodule(output_dir, output, arch, input_dir, gum_dir, capstone_dir):
writer_arch = "x86" if arch.startswith("x86") or arch == "x64" else arch
writer_name = "thumb" if writer_arch == "arm" else writer_arch
capstone_arch = writer_arch
def gum_header_matches_writer(name):
return name == "gum" + writer_name + "writer.h"
def optimize_gum_header(source):
return source.replace("GUM_API ", "")
def capstone_header_matches_arch(name):
if name in ("capstone.h", "platform.h"):
return True
return name == capstone_arch + ".h"
def optimize_capstone_header(source):
result = capstone_include_pattern.sub(transform_capstone_include, source)
result = capstone_export_pattern.sub("", result)
result = result.replace("CAPSTONE_API ", "")
return result
def transform_capstone_include(m):
name = m.group(1)
if name in ("platform", capstone_arch):
return m.group(0)
if name == "systemz":
name = "sysz"
return "typedef int cs_{0};".format(name)
inputs = [
(os.path.join(input_dir, "runtime", "cmodule"), None, is_header, identity_transform),
(os.path.join(input_dir, "..", "..", "ext", "tinycc", "include"), None, is_header, identity_transform),
(os.path.join(gum_dir, "arch-" + writer_arch), os.path.dirname(gum_dir), gum_header_matches_writer, optimize_gum_header),
(os.path.dirname(capstone_dir), None, capstone_header_matches_arch, optimize_capstone_header),
]
with codecs.open(os.path.join(output_dir, output), 'wb', 'utf-8') as output_file:
modules = []
symbols = []
for header_dir, header_reldir, header_filter, header_transform in inputs:
for header_name, header_source in find_headers(header_dir, header_reldir, header_filter, header_transform):
input_identifier = "gum_cmodule_{0}".format(identifier(header_name))
for pattern in (cmodule_function_pattern, cmodule_variable_pattern):
for m in pattern.finditer(header_source):
name = m.group(2)
symbols.append(name)
source_bytes = bytearray(header_source.encode('utf-8'))
source_bytes.append(0)
source_size = len(source_bytes)
output_file.write("static const gchar {0}[{1}] =\n{{".format(input_identifier, source_size))
write_bytes(source_bytes, output_file)
output_file.write("\n};\n\n")
modules.append((header_name, input_identifier, source_size - 1))
output_file.write("static const GumCModuleHeader gum_cmodule_headers[] =\n{")
for input_name, input_identifier, input_size in modules:
output_file.write("\n {{ \"{0}\", {1}, {2} }},".format(input_name, input_identifier, input_size))
output_file.write("\n};\n")
symbol_insertions = [" g_hash_table_insert (symbols, \"{0}\", GUM_FUNCPTR_TO_POINTER ({0}));".format(name) for name in symbols]
output_file.write("""
static void gum_cmodule_deinit_symbols (void);
static GHashTable *
gum_cmodule_get_symbols (void)
{{
static volatile gsize gonce_value;
if (g_once_init_enter (&gonce_value))
{{
GHashTable * symbols;
symbols = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, NULL);
{insertions}
_gum_register_destructor (gum_cmodule_deinit_symbols);
g_once_init_leave (&gonce_value, GPOINTER_TO_SIZE (symbols) + 1);
}}
return GSIZE_TO_POINTER (gonce_value - 1);
}}
static void
gum_cmodule_deinit_symbols (void)
{{
g_hash_table_unref (gum_cmodule_get_symbols ());
}}
""".format(insertions="\n".join(symbol_insertions)))
def find_headers(include_dir, relative_to_dir, is_header, transform):
if relative_to_dir is None:
relative_to_dir = include_dir
for root, dirs, files in os.walk(include_dir):
for name in files:
if is_header(name):
path = os.path.join(root, name)
name = os.path.relpath(path, relative_to_dir).replace("\\", "/")
with codecs.open(path, 'rb', 'utf-8') as f:
source = strip_header(transform(strip_header(f.read())))
yield (name, source)
def is_header(name):
return name.endswith(".h")
def identity_transform(v):
return v
def strip_header(source):
result = c_comment_pattern.sub("", source)
result = cpp_comment_pattern.sub("", result)
while True:
if "\n\n" not in result:
break
result = result.replace("\n\n", "\n")
return result
source_map_pattern = re.compile("//[#@][ \t]sourceMappingURL=[ \t]*data:application/json;.*?base64,([^\\s'\"]*)[ \t]*\n")
def extract_source_map(filename, source_code):
m = source_map_pattern.search(source_code)
if m is None:
return (source_code, None)
raw_source_map = m.group(1)
source_map = json.loads(b64decode(raw_source_map).decode('utf-8'))
source_map['file'] = filename
source_map['sources'] = list(map(to_canonical_source_path, source_map['sources']))
raw_source_map = json.dumps(source_map)
stripped_source_code = source_map_pattern.sub("", source_code)
return (stripped_source_code, raw_source_map)
def to_canonical_source_path(path):
return "frida/" + path
def write_bytes(data, sink):
sink.write("\n ")
line_length = 0
offset = 0
for b in bytearray(data):
if offset > 0:
sink.write(",")
line_length += 1
if line_length >= 70:
sink.write("\n ")
line_length = 0
token = str(b)
sink.write(token)
line_length += len(token)
offset += 1
def identifier(filename):
result = ""
if filename.startswith("frida-"):
filename = filename[6:]
for c in filename:
if c.isalnum():
result += c.lower()
else:
result += "_"
return result
def node_script_path(name):
return os.path.abspath(os.path.join(sys.path[0], "node_modules", ".bin", name + script_suffix()))
def script_suffix():
build_os = platform.system().lower()
return ".cmd" if build_os == 'windows' else ""
if __name__ == '__main__':
arch = sys.argv[1]
input_dir = sys.argv[2]
gum_dir = sys.argv[3]
capstone_dir = sys.argv[4]
output_dir = sys.argv[5]
v8_tmp_dir = os.path.join(output_dir, "runtime-build-v8")
runtime = os.path.abspath(os.path.join(v8_tmp_dir, "frida.js"))
objc = os.path.abspath(os.path.join(v8_tmp_dir, "objc.js"))
java = os.path.abspath(os.path.join(v8_tmp_dir, "java.js"))
v8_options = [
"-x", # No need for Babel, V8 supports modern JS.
"-c", # Compress for smaller code and better performance.
]
subprocess.check_call([node_script_path("frida-compile"), "./runtime/entrypoint-v8.js", "-o", runtime] + v8_options, cwd=input_dir)
subprocess.check_call([node_script_path("frida-compile"), "./runtime/objc.js", "-o", objc] + v8_options, cwd=input_dir)
subprocess.check_call([node_script_path("frida-compile"), "./runtime/java.js", "-o", java] + v8_options, cwd=input_dir)
generate_runtime_v8("runtime", output_dir, "gumv8script-runtime.h", [runtime])
generate_runtime_v8("objc", output_dir, "gumv8script-objc.h", [objc])
generate_runtime_v8("java", output_dir, "gumv8script-java.h", [java])
duk_tmp_dir = os.path.join(output_dir, "runtime-build-duk")
runtime = os.path.abspath(os.path.join(duk_tmp_dir, "frida.js"))
promise = os.path.abspath(os.path.join(duk_tmp_dir, "promise.js"))
objc = os.path.abspath(os.path.join(duk_tmp_dir, "objc.js"))
java = os.path.abspath(os.path.join(duk_tmp_dir, "java.js"))
duk_options = [
"-L", # Tell Babel to sacrifice spec compliance for reduced bloat and better performance.
"-c", # Compress for smaller code and better performance.
]
subprocess.check_call([node_script_path("frida-compile"), "./runtime/entrypoint-duktape.js", "-o", runtime] + duk_options, cwd=input_dir)
subprocess.check_call([node_script_path("frida-compile"), "./runtime/promise.js", "-o", promise, "-x"] + duk_options, cwd=input_dir)
subprocess.check_call([node_script_path("frida-compile"), "./runtime/objc.js", "-o", objc] + duk_options, cwd=input_dir)
subprocess.check_call([node_script_path("frida-compile"), "./runtime/java.js", "-o", java] + duk_options, cwd=input_dir)
generate_runtime_duk("runtime", output_dir, "gumdukscript-runtime.h", input_dir, [runtime])
generate_runtime_duk("promise", output_dir, "gumdukscript-promise.h", input_dir, [promise])
generate_runtime_duk("objc", output_dir, "gumdukscript-objc.h", input_dir, [objc])
generate_runtime_duk("java", output_dir, "gumdukscript-java.h", input_dir, [java])
generate_runtime_cmodule(output_dir, "gumcmodule-runtime.h", arch, input_dir, gum_dir, capstone_dir)
| 40.54798 | 179 | 0.631251 |
79580aabd91ddf598fdc93a950077d6320de4411 | 22,900 | py | Python | InvenTree/common/models.py | mosenturm/InvenTree | d4529ec1c47287dc7b12e8ef38a97c11897b6da7 | [
"MIT"
] | null | null | null | InvenTree/common/models.py | mosenturm/InvenTree | d4529ec1c47287dc7b12e8ef38a97c11897b6da7 | [
"MIT"
] | null | null | null | InvenTree/common/models.py | mosenturm/InvenTree | d4529ec1c47287dc7b12e8ef38a97c11897b6da7 | [
"MIT"
] | null | null | null | """
Common database model definitions.
These models are 'generic' and do not fit a particular business logic object.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.db import models, transaction
from django.db.utils import IntegrityError, OperationalError
from django.conf import settings
import djmoney.settings
from djmoney.models.fields import MoneyField
from djmoney.contrib.exchange.models import convert_money
from djmoney.contrib.exchange.exceptions import MissingRate
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator, URLValidator
from django.core.exceptions import ValidationError
import InvenTree.helpers
import InvenTree.fields
class InvenTreeSetting(models.Model):
"""
An InvenTreeSetting object is a key:value pair used for storing
single values (e.g. one-off settings values).
The class provides a way of retrieving the value for a particular key,
even if that key does not exist.
"""
"""
Dict of all global settings values:
The key of each item is the name of the value as it appears in the database.
Each global setting has the following parameters:
- name: Translatable string name of the setting (required)
- description: Translatable string description of the setting (required)
- default: Default value (optional)
- units: Units of the particular setting (optional)
- validator: Validation function for the setting (optional)
The keys must be upper-case
"""
GLOBAL_SETTINGS = {
'INVENTREE_INSTANCE': {
'name': _('InvenTree Instance Name'),
'default': 'InvenTree server',
'description': _('String descriptor for the server instance'),
},
'INVENTREE_COMPANY_NAME': {
'name': _('Company name'),
'description': _('Internal company name'),
'default': 'My company name',
},
'INVENTREE_BASE_URL': {
'name': _('Base URL'),
'description': _('Base URL for server instance'),
'validator': URLValidator(),
'default': '',
},
'INVENTREE_DEFAULT_CURRENCY': {
'name': _('Default Currency'),
'description': _('Default currency'),
'default': 'USD',
'choices': djmoney.settings.CURRENCY_CHOICES,
},
'INVENTREE_DOWNLOAD_FROM_URL': {
'name': _('Download from URL'),
'description': _('Allow download of remote images and files from external URL'),
'validator': bool,
'default': False,
},
'BARCODE_ENABLE': {
'name': _('Barcode Support'),
'description': _('Enable barcode scanner support'),
'default': True,
'validator': bool,
},
'PART_IPN_REGEX': {
'name': _('IPN Regex'),
'description': _('Regular expression pattern for matching Part IPN')
},
'PART_ALLOW_DUPLICATE_IPN': {
'name': _('Allow Duplicate IPN'),
'description': _('Allow multiple parts to share the same IPN'),
'default': True,
'validator': bool,
},
'PART_ALLOW_EDIT_IPN': {
'name': _('Allow Editing IPN'),
'description': _('Allow changing the IPN value while editing a part'),
'default': True,
'validator': bool,
},
'PART_COPY_BOM': {
'name': _('Copy Part BOM Data'),
'description': _('Copy BOM data by default when duplicating a part'),
'default': True,
'validator': bool,
},
'PART_COPY_PARAMETERS': {
'name': _('Copy Part Parameter Data'),
'description': _('Copy parameter data by default when duplicating a part'),
'default': True,
'validator': bool,
},
'PART_COPY_TESTS': {
'name': _('Copy Part Test Data'),
'description': _('Copy test data by default when duplicating a part'),
'default': True,
'validator': bool
},
'PART_CATEGORY_PARAMETERS': {
'name': _('Copy Category Parameter Templates'),
'description': _('Copy category parameter templates when creating a part'),
'default': True,
'validator': bool
},
'PART_RECENT_COUNT': {
'name': _('Recent Part Count'),
'description': _('Number of recent parts to display on index page'),
'default': 10,
'validator': [int, MinValueValidator(1)]
},
'PART_TEMPLATE': {
'name': _('Template'),
'description': _('Parts are templates by default'),
'default': False,
'validator': bool,
},
'PART_ASSEMBLY': {
'name': _('Assembly'),
'description': _('Parts can be assembled from other components by default'),
'default': False,
'validator': bool,
},
'PART_COMPONENT': {
'name': _('Component'),
'description': _('Parts can be used as sub-components by default'),
'default': True,
'validator': bool,
},
'PART_PURCHASEABLE': {
'name': _('Purchaseable'),
'description': _('Parts are purchaseable by default'),
'default': False,
'validator': bool,
},
'PART_SALABLE': {
'name': _('Salable'),
'description': _('Parts are salable by default'),
'default': False,
'validator': bool,
},
'PART_TRACKABLE': {
'name': _('Trackable'),
'description': _('Parts are trackable by default'),
'default': False,
'validator': bool,
},
'PART_VIRTUAL': {
'name': _('Virtual'),
'description': _('Parts are virtual by default'),
'default': False,
'validator': bool,
},
'PART_SHOW_QUANTITY_IN_FORMS': {
'name': _('Show Quantity in Forms'),
'description': _('Display available part quantity in some forms'),
'default': True,
'validator': bool,
},
'REPORT_DEBUG_MODE': {
'name': _('Debug Mode'),
'description': _('Generate reports in debug mode (HTML output)'),
'default': False,
'validator': bool,
},
'REPORT_DEFAULT_PAGE_SIZE': {
'name': _('Page Size'),
'description': _('Default page size for PDF reports'),
'default': 'A4',
'choices': [
('A4', 'A4'),
('Legal', 'Legal'),
('Letter', 'Letter')
],
},
'REPORT_ENABLE_TEST_REPORT': {
'name': _('Test Reports'),
'description': _('Enable generation of test reports'),
'default': True,
'validator': bool,
},
'STOCK_ENABLE_EXPIRY': {
'name': _('Stock Expiry'),
'description': _('Enable stock expiry functionality'),
'default': False,
'validator': bool,
},
'STOCK_ALLOW_EXPIRED_SALE': {
'name': _('Sell Expired Stock'),
'description': _('Allow sale of expired stock'),
'default': False,
'validator': bool,
},
'STOCK_STALE_DAYS': {
'name': _('Stock Stale Time'),
'description': _('Number of days stock items are considered stale before expiring'),
'default': 0,
'units': _('days'),
'validator': [int],
},
'STOCK_ALLOW_EXPIRED_BUILD': {
'name': _('Build Expired Stock'),
'description': _('Allow building with expired stock'),
'default': False,
'validator': bool,
},
'STOCK_OWNERSHIP_CONTROL': {
'name': _('Stock Ownership Control'),
'description': _('Enable ownership control over stock locations and items'),
'default': False,
'validator': bool,
},
'STOCK_GROUP_BY_PART': {
'name': _('Group by Part'),
'description': _('Group stock items by part reference in table views'),
'default': True,
'validator': bool,
},
'STOCK_RECENT_COUNT': {
'name': _('Recent Stock Count'),
'description': _('Number of recent stock items to display on index page'),
'default': 10,
'validator': [int, MinValueValidator(1)]
},
'BUILDORDER_REFERENCE_PREFIX': {
'name': _('Build Order Reference Prefix'),
'description': _('Prefix value for build order reference'),
'default': 'BO',
},
'BUILDORDER_REFERENCE_REGEX': {
'name': _('Build Order Reference Regex'),
'description': _('Regular expression pattern for matching build order reference')
},
'SALESORDER_REFERENCE_PREFIX': {
'name': _('Sales Order Reference Prefix'),
'description': _('Prefix value for sales order reference'),
'default': 'SO',
},
'PURCHASEORDER_REFERENCE_PREFIX': {
'name': _('Purchase Order Reference Prefix'),
'description': _('Prefix value for purchase order reference'),
'default': 'PO',
},
}
class Meta:
verbose_name = "InvenTree Setting"
verbose_name_plural = "InvenTree Settings"
@classmethod
def get_setting_name(cls, key):
"""
Return the name of a particular setting.
If it does not exist, return an empty string.
"""
key = str(key).strip().upper()
if key in cls.GLOBAL_SETTINGS:
setting = cls.GLOBAL_SETTINGS[key]
return setting.get('name', '')
else:
return ''
@classmethod
def get_setting_description(cls, key):
"""
Return the description for a particular setting.
If it does not exist, return an empty string.
"""
key = str(key).strip().upper()
if key in cls.GLOBAL_SETTINGS:
setting = cls.GLOBAL_SETTINGS[key]
return setting.get('description', '')
else:
return ''
@classmethod
def get_setting_units(cls, key):
"""
Return the units for a particular setting.
If it does not exist, return an empty string.
"""
key = str(key).strip().upper()
if key in cls.GLOBAL_SETTINGS:
setting = cls.GLOBAL_SETTINGS[key]
return setting.get('units', '')
else:
return ''
@classmethod
def get_setting_validator(cls, key):
"""
Return the validator for a particular setting.
If it does not exist, return None
"""
key = str(key).strip().upper()
if key in cls.GLOBAL_SETTINGS:
setting = cls.GLOBAL_SETTINGS[key]
return setting.get('validator', None)
else:
return None
@classmethod
def get_setting_default(cls, key):
"""
Return the default value for a particular setting.
If it does not exist, return an empty string
"""
key = str(key).strip().upper()
if key in cls.GLOBAL_SETTINGS:
setting = cls.GLOBAL_SETTINGS[key]
return setting.get('default', '')
else:
return ''
@classmethod
def get_setting_choices(cls, key):
"""
Return the validator choices available for a particular setting.
"""
key = str(key).strip().upper()
if key in cls.GLOBAL_SETTINGS:
setting = cls.GLOBAL_SETTINGS[key]
choices = setting.get('choices', None)
else:
choices = None
"""
TODO:
if type(choices) is function:
# Evaluate the function (we expect it will return a list of tuples...)
return choices()
"""
return choices
@classmethod
def get_setting_object(cls, key):
"""
Return an InvenTreeSetting object matching the given key.
- Key is case-insensitive
- Returns None if no match is made
"""
key = str(key).strip().upper()
try:
setting = InvenTreeSetting.objects.filter(key__iexact=key).first()
except (ValueError, InvenTreeSetting.DoesNotExist):
setting = None
except (IntegrityError, OperationalError):
setting = None
# Setting does not exist! (Try to create it)
if not setting:
setting = InvenTreeSetting(key=key, value=InvenTreeSetting.get_setting_default(key))
try:
# Wrap this statement in "atomic", so it can be rolled back if it fails
with transaction.atomic():
setting.save()
except (IntegrityError, OperationalError):
# It might be the case that the database isn't created yet
pass
return setting
@classmethod
def get_setting_pk(cls, key):
"""
Return the primary-key value for a given setting.
If the setting does not exist, return None
"""
setting = InvenTreeSetting.get_setting_object(cls)
if setting:
return setting.pk
else:
return None
@classmethod
def get_setting(cls, key, backup_value=None):
"""
Get the value of a particular setting.
If it does not exist, return the backup value (default = None)
"""
# If no backup value is specified, atttempt to retrieve a "default" value
if backup_value is None:
backup_value = cls.get_setting_default(key)
setting = InvenTreeSetting.get_setting_object(key)
if setting:
value = setting.value
# If the particular setting is defined as a boolean, cast the value to a boolean
if setting.is_bool():
value = InvenTree.helpers.str2bool(value)
if setting.is_int():
try:
value = int(value)
except (ValueError, TypeError):
value = backup_value
else:
value = backup_value
return value
@classmethod
def set_setting(cls, key, value, user, create=True):
"""
Set the value of a particular setting.
If it does not exist, option to create it.
Args:
key: settings key
value: New value
user: User object (must be staff member to update a core setting)
create: If True, create a new setting if the specified key does not exist.
"""
if user is not None and not user.is_staff:
return
try:
setting = InvenTreeSetting.objects.get(key__iexact=key)
except InvenTreeSetting.DoesNotExist:
if create:
setting = InvenTreeSetting(key=key)
else:
return
# Enforce standard boolean representation
if setting.is_bool():
value = InvenTree.helpers.str2bool(value)
setting.value = str(value)
setting.save()
key = models.CharField(max_length=50, blank=False, unique=True, help_text=_('Settings key (must be unique - case insensitive'))
value = models.CharField(max_length=200, blank=True, unique=False, help_text=_('Settings value'))
@property
def name(self):
return InvenTreeSetting.get_setting_name(self.key)
@property
def default_value(self):
return InvenTreeSetting.get_setting_default(self.key)
@property
def description(self):
return InvenTreeSetting.get_setting_description(self.key)
@property
def units(self):
return InvenTreeSetting.get_setting_units(self.key)
def clean(self):
"""
If a validator (or multiple validators) are defined for a particular setting key,
run them against the 'value' field.
"""
super().clean()
validator = InvenTreeSetting.get_setting_validator(self.key)
if self.is_bool():
self.value = InvenTree.helpers.str2bool(self.value)
if self.is_int():
try:
self.value = int(self.value)
except (ValueError):
raise ValidationError(_('Must be an integer value'))
if validator is not None:
self.run_validator(validator)
def run_validator(self, validator):
"""
Run a validator against the 'value' field for this InvenTreeSetting object.
"""
if validator is None:
return
value = self.value
# Boolean validator
if self.is_bool():
# Value must "look like" a boolean value
if InvenTree.helpers.is_bool(value):
# Coerce into either "True" or "False"
value = InvenTree.helpers.str2bool(value)
else:
raise ValidationError({
'value': _('Value must be a boolean value')
})
# Integer validator
if self.is_int():
try:
# Coerce into an integer value
value = int(value)
except (ValueError, TypeError):
raise ValidationError({
'value': _('Value must be an integer value'),
})
# If a list of validators is supplied, iterate through each one
if type(validator) in [list, tuple]:
for v in validator:
self.run_validator(v)
if callable(validator):
# We can accept function validators with a single argument
validator(self.value)
def validate_unique(self, exclude=None):
""" Ensure that the key:value pair is unique.
In addition to the base validators, this ensures that the 'key'
is unique, using a case-insensitive comparison.
"""
super().validate_unique(exclude)
try:
setting = InvenTreeSetting.objects.exclude(id=self.id).filter(key__iexact=self.key)
if setting.exists():
raise ValidationError({'key': _('Key string must be unique')})
except InvenTreeSetting.DoesNotExist:
pass
def choices(self):
"""
Return the available choices for this setting (or None if no choices are defined)
"""
return InvenTreeSetting.get_setting_choices(self.key)
def is_bool(self):
"""
Check if this setting is required to be a boolean value
"""
validator = InvenTreeSetting.get_setting_validator(self.key)
if validator == bool:
return True
if type(validator) in [list, tuple]:
for v in validator:
if v == bool:
return True
def as_bool(self):
"""
Return the value of this setting converted to a boolean value.
Warning: Only use on values where is_bool evaluates to true!
"""
return InvenTree.helpers.str2bool(self.value)
def is_int(self):
"""
Check if the setting is required to be an integer value:
"""
validator = InvenTreeSetting.get_setting_validator(self.key)
if validator == int:
return True
if type(validator) in [list, tuple]:
for v in validator:
if v == int:
return True
return False
def as_int(self):
"""
Return the value of this setting converted to a boolean value.
If an error occurs, return the default value
"""
try:
value = int(self.value)
except (ValueError, TypeError):
value = self.default_value()
return value
class PriceBreak(models.Model):
"""
Represents a PriceBreak model
"""
class Meta:
abstract = True
quantity = InvenTree.fields.RoundingDecimalField(
max_digits=15,
decimal_places=5,
default=1,
validators=[MinValueValidator(1)],
verbose_name=_('Quantity'),
help_text=_('Price break quantity'),
)
price = MoneyField(
max_digits=19,
decimal_places=4,
default_currency='USD',
null=True,
verbose_name=_('Price'),
help_text=_('Unit price at specified quantity'),
)
def convert_to(self, currency_code):
"""
Convert the unit-price at this price break to the specified currency code.
Args:
currency_code - The currency code to convert to (e.g "USD" or "AUD")
"""
try:
converted = convert_money(self.price, currency_code)
except MissingRate:
print(f"WARNING: No currency conversion rate available for {self.price_currency} -> {currency_code}")
return self.price.amount
return converted.amount
class ColorTheme(models.Model):
""" Color Theme Setting """
default_color_theme = ('', _('Default'))
name = models.CharField(max_length=20,
default='',
blank=True)
user = models.CharField(max_length=150,
unique=True)
@classmethod
def get_color_themes_choices(cls):
""" Get all color themes from static folder """
# Get files list from css/color-themes/ folder
files_list = []
for file in os.listdir(settings.STATIC_COLOR_THEMES_DIR):
files_list.append(os.path.splitext(file))
# Get color themes choices (CSS sheets)
choices = [(file_name.lower(), _(file_name.replace('-', ' ').title()))
for file_name, file_ext in files_list
if file_ext == '.css' and file_name.lower() != 'default']
# Add default option as empty option
choices.insert(0, cls.default_color_theme)
return choices
@classmethod
def is_valid_choice(cls, user_color_theme):
""" Check if color theme is valid choice """
try:
user_color_theme_name = user_color_theme.name
except AttributeError:
return False
for color_theme in cls.get_color_themes_choices():
if user_color_theme_name == color_theme[0]:
return True
return False
| 29.74026 | 131 | 0.557336 |
79580affa16a6f4d47e5ad5fe63883835a0bce05 | 333 | py | Python | Leetcode/Easy/Single_Number.py | drkndl/Coding-Practice | 3527e3dadcb593729517b750402812d4a64bca14 | [
"MIT"
] | null | null | null | Leetcode/Easy/Single_Number.py | drkndl/Coding-Practice | 3527e3dadcb593729517b750402812d4a64bca14 | [
"MIT"
] | null | null | null | Leetcode/Easy/Single_Number.py | drkndl/Coding-Practice | 3527e3dadcb593729517b750402812d4a64bca14 | [
"MIT"
] | null | null | null | # Runtime: 3584 ms, faster than 8.17% of Python3 online submissions for Single Number.
# Memory Usage: 16.7 MB, less than 22.54% of Python3 online submissions for Single Number.
class Solution:
def singleNumber(self, nums: List[int]) -> int:
for i in set(nums):
if nums.count(i)==1:
return i
| 37 | 90 | 0.645646 |
79580b6a0be720c21b24d1fd9fc22b695158c955 | 875 | bzl | Python | xls/build/py_proto_library.bzl | ted-xie/xls | ef48ade3403fffc6481ffd779e49aa7082ee268f | [
"Apache-2.0"
] | null | null | null | xls/build/py_proto_library.bzl | ted-xie/xls | ef48ade3403fffc6481ffd779e49aa7082ee268f | [
"Apache-2.0"
] | null | null | null | xls/build/py_proto_library.bzl | ted-xie/xls | ef48ade3403fffc6481ffd779e49aa7082ee268f | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapter between open source and Google-internal py_proto_library rules."""
load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
def xls_py_proto_library(name, internal_deps, srcs, deps = []):
py_proto_library(
name = name,
srcs = srcs,
deps = deps,
)
| 35 | 77 | 0.729143 |
79580c07920e606edf684a66dcd275897f31e372 | 543 | py | Python | departments/migrations/0001_initial.py | 1Mans/test_task | f6f0fef4748cdfa677f32e494e39c03b22043306 | [
"MIT"
] | null | null | null | departments/migrations/0001_initial.py | 1Mans/test_task | f6f0fef4748cdfa677f32e494e39c03b22043306 | [
"MIT"
] | null | null | null | departments/migrations/0001_initial.py | 1Mans/test_task | f6f0fef4748cdfa677f32e494e39c03b22043306 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2021-10-30 09:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
]
| 23.608696 | 114 | 0.569061 |
79580ce240749747e75a8776842750269e733ddf | 2,883 | py | Python | src/RWR/seed_calculation.py | agneet42/ImmunAL | ac4dc0d3b6763deab7610616a7e3061161166eb2 | [
"MIT"
] | null | null | null | src/RWR/seed_calculation.py | agneet42/ImmunAL | ac4dc0d3b6763deab7610616a7e3061161166eb2 | [
"MIT"
] | null | null | null | src/RWR/seed_calculation.py | agneet42/ImmunAL | ac4dc0d3b6763deab7610616a7e3061161166eb2 | [
"MIT"
] | null | null | null | #%%
import csv
from scipy.stats.stats import pearsonr
file = csv.reader(open("diseased_final.csv","r"))
all_arr = []
temp_all_arr = []
for lines in file:
if(len(lines) > 0):
temp_all_arr.append(lines)
print(len(temp_all_arr))
for k in range(0,2333):
temp = []
for all in temp_all_arr:
temp.append(all[k])
all_arr.append(temp)
for x in range(0,2333):
all_arr[x] = [float(j) for j in all_arr[x]]
arr_fin = []
for i in range(0,2333):
for j in range(i+1,2333):
temp = []
temp.append(i+1)
temp.append(j+1)
val,unimp = pearsonr(all_arr[i],all_arr[j])
temp.append(val)
arr_fin.append(temp)
print(i)
new_file = csv.writer(open("diseased_pcc_final.csv","w"))
for i in arr_fin:
new_file.writerow(i)
print("Part 1 done")
#%%
import csv
import networkx as nx
import operator
import statistics
import numpy as np
G = nx.Graph()
file = csv.reader(open("controlled_diseased_final.csv",'r'))
file1 = csv.reader(open("controlled_diseased_final.csv",'r'))
file2 = csv.reader(open("controlled_diseased_final.csv",'r'))
nodes = []
nodes_temp = []
count = 0
for lines in file:
if (len(lines)>0):
nodes_temp.append(lines[0])
nodes_temp.append(lines[1])
count = count + 1
nodes = list(set(nodes_temp))
# print(len(nodes))
# G.add_nodes_from(nodes)
corr_array = []
for lines in file2:
if(len(lines)>0):
corr_array.append(float(lines[2]))
# print(len(corr_array))
mean1 = np.mean(corr_array)
stddev1 = statistics.stdev(corr_array)
max_range = mean1 + stddev1
max_range = max_range / 2 # variable, to be changed during testing
min_range = mean1 - stddev1
min_range = min_range / 2 # variable, to be changed during testing
edges = []
nodes_count = []
for lines in file1:
if (len(lines)>0):
if((float(lines[2]) > min_range) and (float(lines[2]) < max_range)):
nodes_count.append(lines[0])
nodes_count.append(lines[1])
edges_temp = []
# edges_temp = [lines[0],lines[1]]
edges_temp = [lines[0],lines[1],float(lines[2])]
edges.append(edges_temp)
# print(len(edges))
with open("layer1_unweighted_v2_final.csv", "w") as f:
writer = csv.writer(f)
writer.writerows(edges)
print("done1")
nodes = []
nodes = list(set(nodes_count))
print(len(nodes))
print(len(edges))
G.add_nodes_from(nodes)
G.add_weighted_edges_from(edges,weight='weight')
print("ready for calculation")
# dict1 = nx.closeness_centrality(G,distance='weight')
# dict1 = nx.degree_centrality(G)
dict1 = nx.eigenvector_centrality(G,weight='weight')
sorted_dict1 = sorted(dict1.items(),key = operator.itemgetter(1),reverse = True)
sorted_dict1 = sorted_dict1[:40] # variable, to be changed during testing
for x in sorted_dict1:
print(x[0])
# file3 = csv.writer(open('result_controlled.csv','w'))
print("Part 2 done") | 23.25 | 80 | 0.665973 |
79580d59828719a9131bf90ebd83de6fbe83678d | 17,227 | py | Python | preview_generator/manager.py | asweeney86/preview-generator | 354cbac1c131ebbb81cd9cfd9b4bc0c184d10103 | [
"MIT"
] | null | null | null | preview_generator/manager.py | asweeney86/preview-generator | 354cbac1c131ebbb81cd9cfd9b4bc0c184d10103 | [
"MIT"
] | null | null | null | preview_generator/manager.py | asweeney86/preview-generator | 354cbac1c131ebbb81cd9cfd9b4bc0c184d10103 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import hashlib
import logging
import os
import typing
from filelock import FileLock
from preview_generator.extension import mimetypes_storage
from preview_generator.preview.builder.document_generic import DocumentPreviewBuilder
from preview_generator.preview.builder_factory import PreviewBuilderFactory
from preview_generator.utils import ImgDims
from preview_generator.utils import LOCKFILE_EXTENSION
from preview_generator.utils import LOCK_DEFAULT_TIMEOUT
from preview_generator.utils import LOGGER_NAME
class PreviewContext(object):
def __init__(
self,
preview_builder_factory: PreviewBuilderFactory,
cache_path: str,
file_path: str,
file_ext: str,
):
self.mimetype = preview_builder_factory.get_file_mimetype(file_path, file_ext)
self.builder = preview_builder_factory.get_preview_builder(self.mimetype)
self.hash = hashlib.md5(file_path.encode("utf-8")).hexdigest()
file_lock_path = os.path.join(cache_path, self.hash + LOCKFILE_EXTENSION)
self.filelock = FileLock(file_lock_path, timeout=LOCK_DEFAULT_TIMEOUT)
class PreviewManager(object):
def __init__(self, cache_folder_path: str, create_folder: bool = False) -> None:
"""
:param cache_folder_path: path to the cache folder.
This is where previews will be stored
:param create_folder: if True, then create the cache folder
if it does not exist
"""
self.logger = logging.getLogger(LOGGER_NAME)
cache_folder_path = os.path.join(cache_folder_path, "") # add trailing slash
# nopep8 see https://stackoverflow.com/questions/2736144/python-add-trailing-slash-to-directory-string-os-independently
self.cache_path = cache_folder_path # type: str
self._factory = (
PreviewBuilderFactory.get_instance()
) # nopep8 keep link to singleton instance as it will be often used
if create_folder and not os.path.isdir(self.cache_path):
try:
os.makedirs(self.cache_path)
except OSError:
self.logger.error("cant create cache folder [{}]".format(self.cache_path))
def get_preview_context(self, file_path: str, file_ext: str) -> PreviewContext:
return PreviewContext(self._factory, self.cache_path, file_path, file_ext)
def get_mimetype(self, file_path: str, file_ext: str = "") -> str:
"""
Return detected mimetype of the file
:param file_path: path of the file
:param file_ext: extension associated to the file. Eg 'jpg'. May be empty -
it's usefull if the extension can't be found in file_path
:return: mimetype of the file
"""
return (
PreviewBuilderFactory().get_instance().get_file_mimetype(file_path, file_ext)
) # nopep8
def has_pdf_preview(self, file_path: str, file_ext: str = "") -> bool:
"""
return True if the given file offers PDF preview
Actually, this is the case for office
:param file_path:
:param file_ext: extension associated to the file. Eg 'jpg'. May be empty -
it's usefull if the extension can't be found in file_path
:return:
"""
return self.get_preview_context(file_path, file_ext).builder.has_pdf_preview()
def has_jpeg_preview(self, file_path: str, file_ext: str = "") -> bool:
"""
return True if the given file offers jpeg preview
Actually, this is the case for office, documents and file type
:param file_path:
:param file_ext: extension associated to the file. Eg 'jpg'.
May be empty - it's usefull if the extension can't be found in file_path
:return:
"""
return self.get_preview_context(file_path, file_ext).builder.has_jpeg_preview()
def has_text_preview(self, file_path: str, file_ext: str = "") -> bool:
"""
return True if the given file offers text preview
Actually, this is the case for text file and archive file
:param file_path:
:param file_ext: extension associated to the file. Eg 'jpg'.
May be empty - it's usefull if the extension can't be found in file_path
:return:
"""
return self.get_preview_context(file_path, file_ext).builder.has_text_preview()
def has_json_preview(self, file_path: str, file_ext: str = "") -> bool:
"""
return True if the given file offers json preview
Actually, this is the case for most type using exiftool
:param file_path:
:param file_ext: extension associated to the file. Eg 'jpg'.
May be empty - it's usefull if the extension can't be found in file_path
:return:
"""
return self.get_preview_context(file_path, file_ext).builder.has_json_preview()
def has_html_preview(self, file_path: str, file_ext: str = "") -> bool:
"""
return True if the given file offers html preview
Actually, this is the case for archive files
:param file_path:
:param file_ext: extension associated to the file. Eg 'jpg'.
May be empty - it's usefull if the extension can't be found in file_path
:return:
"""
return self.get_preview_context(file_path, file_ext).builder.has_html_preview()
def get_page_nb(self, file_path: str, file_ext: str = "") -> int:
"""
Return the page number of the given file.
:param file_path: path of the file
:param file_ext: extension associated to the file. Eg 'jpg'. May be empty -
it's usefull if the extension can't be found in file_path
:return: number of pages. Default is 1 (eg for a JPEG)
"""
preview_context = self.get_preview_context(file_path, file_ext)
# INFO - G.M - 2021-04-29 deal with pivot format
# jpeg preview from pdf for libreoffice/scribus
# - change original file to use to pivot file (pdf preview) of the content instead of the
# original file
# - use preview context of this pivot pdf file.
if isinstance(preview_context.builder, DocumentPreviewBuilder):
file_path = self.get_pdf_preview(file_path=file_path, force=False)
preview_context = self.get_preview_context(file_path, file_ext=".pdf")
with preview_context.filelock:
return preview_context.builder.get_page_number(
file_path, preview_context.hash, self.cache_path, preview_context.mimetype
)
def get_jpeg_preview(
self,
file_path: str,
page: int = -1,
width: int = None,
height: int = 256,
force: bool = False,
file_ext: str = "",
dry_run: bool = False,
) -> str:
"""
Return a JPEG preview of given file, according to parameters
:param file_path: path of the file to preview
:param page: page of the original document, if it makes sense
:param width: width of the requested preview image
:param height: height of the requested preview image
:param force: if True, do not use cached preview.
:param file_ext: extension associated to the file. Eg 'jpg'. May be empty -
it's useful if the extension can't be found in file_path
:param dry_run: Don't actually generate the file, but return its path as
if we had
:return: path to the generated preview file
"""
preview_context = self.get_preview_context(file_path, file_ext)
if width is None:
width = height
size = ImgDims(width=width, height=height)
extension = ".jpeg"
preview_name = self._get_preview_name(preview_context.hash, size, page)
preview_file_path = os.path.join(self.cache_path, preview_name + extension) # nopep8
if dry_run:
return preview_file_path
# INFO - G.M - 2021-04-29 deal with pivot format
# jpeg preview from pdf for libreoffice/scribus
# - change original file to use to pivot file (pdf preview) of the content instead of the
# original file
# - use preview context of this pivot pdf file.
if isinstance(preview_context.builder, DocumentPreviewBuilder):
file_path = self.get_pdf_preview(file_path=file_path, force=force)
preview_context = self.get_preview_context(file_path, file_ext=".pdf")
with preview_context.filelock:
if force or not os.path.exists(preview_file_path):
preview_context.builder.build_jpeg_preview(
file_path=file_path,
preview_name=preview_name,
cache_path=self.cache_path,
page_id=max(page, 0), # if page is -1 then return preview of first page,
extension=extension,
size=size,
mimetype=preview_context.mimetype,
)
return preview_file_path
def get_pdf_preview(
self,
file_path: str,
page: int = -1,
force: bool = False,
file_ext: str = "",
dry_run: bool = False,
) -> str:
"""
Return a PDF preview of given file, according to parameters
:param file_path: path of the file to preview
:param page: page of the original document. -1 means "all pages"
:param force: if True, do not use cached preview.
:param file_ext: extension associated to the file. Eg 'jpg'. May be empty -
it's usefull if the extension can't be found in file_path
:param dry_run: Don't actually generate the file, but return its path as
if we had
:return: path to the generated preview file
"""
preview_context = self.get_preview_context(file_path, file_ext)
extension = ".pdf"
preview_name = self._get_preview_name(filehash=preview_context.hash, page=page)
try:
cache_file_path = self.cache_path + preview_name + extension
if dry_run:
return cache_file_path
with preview_context.filelock:
if force or not os.path.exists(cache_file_path):
preview_context.builder.build_pdf_preview(
file_path=file_path,
preview_name=preview_name,
cache_path=self.cache_path,
extension=extension,
page_id=page,
mimetype=preview_context.mimetype,
)
return cache_file_path
except AttributeError:
raise Exception("Error while getting the file the file preview")
def get_text_preview(
self, file_path: str, force: bool = False, file_ext: str = "", dry_run: bool = False
) -> str:
"""
Return a TXT preview of given file, according to parameters
:param file_path: path of the file to preview
:param force: if True, do not use cached preview.
:param file_ext: extension associated to the file. Eg 'jpg'. May be empty -
it's usefull if the extension can't be found in file_path
:param dry_run: Don't actually generate the file, but return its path as
if we had
:return: path to the generated preview file
"""
preview_context = self.get_preview_context(file_path, file_ext)
extension = ".txt"
preview_name = self._get_preview_name(filehash=preview_context.hash)
try:
cache_file_path = self.cache_path + preview_name + extension
if dry_run:
return cache_file_path
with preview_context.filelock:
if force or not os.path.exists(cache_file_path):
preview_context.builder.build_text_preview(
file_path=file_path,
preview_name=preview_name,
cache_path=self.cache_path,
extension=extension,
)
return cache_file_path
except AttributeError:
raise Exception("Error while getting the file the file preview")
def get_html_preview(
self, file_path: str, force: bool = False, file_ext: str = "", dry_run: bool = False
) -> str:
"""
Return a HTML preview of given file, according to parameters
:param file_path: path of the file to preview
:param force: if True, do not use cached preview.
:param file_ext: extension associated to the file. Eg 'jpg'. May be empty -
it's usefull if the extension can't be found in file_path
:param dry_run: Don't actually generate the file, but return its path as
if we had
:return: path to the generated preview file
"""
preview_context = self.get_preview_context(file_path, file_ext)
extension = ".html"
preview_name = self._get_preview_name(filehash=preview_context.hash)
try:
cache_file_path = self.cache_path + preview_name + extension
if dry_run:
return cache_file_path
with preview_context.filelock:
if force or not os.path.exists(cache_file_path):
preview_context.builder.build_html_preview(
file_path=file_path,
preview_name=preview_name,
cache_path=self.cache_path,
extension=extension,
)
return cache_file_path
except AttributeError:
raise Exception("Error while getting the file the file preview")
def get_json_preview(
self, file_path: str, force: bool = False, file_ext: str = "", dry_run: bool = False
) -> str:
"""
Return a JSON preview of given file, according to parameters
:param file_path: path of the file to preview
:param force: if True, do not use cached preview.
:param file_ext: extension associated to the file. Eg 'jpg'. May be empty -
it's usefull if the extension can't be found in file_path
:param dry_run: Don't actually generate the file, but return its path as
if we had
:return: path to the generated preview file
"""
preview_context = self.get_preview_context(file_path, file_ext)
extension = ".json"
preview_name = self._get_preview_name(filehash=preview_context.hash)
try:
cache_file_path = self.cache_path + preview_name + extension
if dry_run:
return cache_file_path
with preview_context.filelock:
if force or not os.path.exists(cache_file_path): # nopep8
preview_context.builder.build_json_preview(
file_path=file_path,
preview_name=preview_name,
cache_path=self.cache_path,
extension=extension,
)
return cache_file_path
except AttributeError:
raise Exception("Error while getting the file preview")
def _get_preview_name(self, filehash: str, size: ImgDims = None, page: int = None) -> str:
"""
Build a hash based on the given parameters.
This hash will be used as key for caching generated previews.
The hash is something like this:
720f89890597ec1eb45e7b775898e806-320x139-page32
:param hash: hash of the original file
:param size: requested size (width and height)
:param page: requested page
:return:
"""
page_str = ""
if page is not None and page > -1:
page_str = "-page{page}".format(page=page)
size_str = ""
if size:
size_str = "-{width}x{height}".format(width=size.width, height=size.height)
return "{hash}{size}{page}".format(hash=filehash, size=size_str, page=page_str)
def get_supported_mimetypes(self) -> typing.List[str]:
return self._factory.get_supported_mimetypes()
def get_file_extension(self, mime: str) -> typing.Optional[str]:
"""
Get one valid file extension related to the given mimetype.
"""
return mimetypes_storage.guess_extension(mime, strict=False)
def get_file_extensions(self, mime: str) -> typing.List[str]:
"""
get all valid file extensions for one the given mimetype
"""
return mimetypes_storage.guess_all_extensions(mime, strict=False)
def get_supported_file_extensions(self) -> typing.List[str]:
"""
Get all supported file_extension by preview_generator
:return:
"""
supported_file_extensions = []
for mime in self.get_supported_mimetypes():
extensions = mimetypes_storage.guess_all_extensions(mime, strict=False)
supported_file_extensions.extend(extensions)
return supported_file_extensions
| 43.28392 | 127 | 0.627794 |
79580d7c895db17902075de5a4c88f830086e20e | 6,429 | py | Python | test/functional/rpc_bind.py | minblock/daipercoins | f01d6c65daf8fdf236cc4b40245260539406bb0f | [
"MIT"
] | null | null | null | test/functional/rpc_bind.py | minblock/daipercoins | f01d6c65daf8fdf236cc4b40245260539406bb0f | [
"MIT"
] | null | null | null | test/functional/rpc_bind.py | minblock/daipercoins | f01d6c65daf8fdf236cc4b40245260539406bb0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running daipercoinsd with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| 49.453846 | 172 | 0.63338 |
79580e131f0ef715b8b1b25429524a1ef4eb679a | 2,140 | py | Python | clusterisation/clusterisation.py | MADE-realtime/realtime_news | 0a63687cad0ccefd772b2c28d7138d8db52d5f20 | [
"MIT"
] | null | null | null | clusterisation/clusterisation.py | MADE-realtime/realtime_news | 0a63687cad0ccefd772b2c28d7138d8db52d5f20 | [
"MIT"
] | null | null | null | clusterisation/clusterisation.py | MADE-realtime/realtime_news | 0a63687cad0ccefd772b2c28d7138d8db52d5f20 | [
"MIT"
] | null | null | null | from typing import List
from sklearn.cluster import AgglomerativeClustering
import fasttext.util
import numpy as np
from datetime import date
from db_lib.crud import get_news_by_filters
from config import LANGUAGE_SHORT_FOR_FASTTEXT, LIMIT_NEWS
from db_lib.models import News
from db_lib.database import SessionLocal
from sqlalchemy.orm import Session
import click
import numpy
def download_language_model():
fasttext.util.download_model(LANGUAGE_SHORT_FOR_FASTTEXT, if_exists='ignore')
model = fasttext.load_model(f'cc.{LANGUAGE_SHORT_FOR_FASTTEXT}.300.bin')
return model
def clean_nones_from_content(news_list: List[News]) -> List[News]:
for i, news in enumerate(news_list):
if news.content is None:
news_list[i].content = news.title
return news_list
def cluster_news_content(news_list: List[News]) -> np.ndarray:
if not news_list:
return numpy.array(news_list)
model = download_language_model()
clusterer = AgglomerativeClustering(
n_clusters=None,
affinity='cosine',
linkage='complete',
distance_threshold=0.25,
)
# news_list = clean_nones_from_content(news_list)
# content_emb = [model[one_news.content] for one_news in news_list]
title_emb = [model[one_news.title] for one_news in news_list]
clusters = clusterer.fit_predict(title_emb)
return clusters
@click.command()
@click.option("--start_date", type=click.DateTime(formats=["%Y-%m-%d"]),
default=str(date.today()))
@click.option("--end_date", type=click.DateTime(formats=["%Y-%m-%d"]),
default=str(date.today()))
def cluster_messages(start_date: date, end_date: date, db: Session = SessionLocal()):
"""Загружаем все сообщения (пока сообщений немного) и кластеризуем их с помощью кластеризатора"""
news_list = get_news_by_filters(db, topic=None, start_date=start_date, end_date=end_date, limit=LIMIT_NEWS)
cluster_num = cluster_news_content(news_list)
for i in range(len(news_list)):
news_list[i].cluster_num = cluster_num[i].item()
db.commit()
if __name__ == '__main__':
cluster_messages() | 32.923077 | 111 | 0.728505 |
79580e41c75970ea3cb2ae1647ebb00adb19251b | 455 | py | Python | tests/test_utils/test_slot_inheritence.py | krishna-saravan/linkml | 8c34844ebaf054f44ceb386e4d51ee4c95dbebe6 | [
"CC0-1.0"
] | 83 | 2021-03-17T16:31:02.000Z | 2022-03-13T23:17:02.000Z | tests/test_utils/test_slot_inheritence.py | krishna-saravan/linkml | 8c34844ebaf054f44ceb386e4d51ee4c95dbebe6 | [
"CC0-1.0"
] | 390 | 2021-03-18T18:44:11.000Z | 2022-03-30T22:55:01.000Z | tests/test_utils/test_slot_inheritence.py | krishna-saravan/linkml | 8c34844ebaf054f44ceb386e4d51ee4c95dbebe6 | [
"CC0-1.0"
] | 20 | 2021-03-27T08:55:56.000Z | 2022-02-24T15:25:57.000Z | import unittest
from linkml.utils.schemaloader import SchemaLoader
from tests.test_utils.environment import env
class InheritedSlotTestCase(unittest.TestCase):
def test_inherited_slot(self):
""" Validate default slot range settings """
schema = SchemaLoader(env.input_path('inherited_slots.yaml')).resolve()
self.assertTrue('same as' in schema.classes['named thing'].slots)
if __name__ == '__main__':
unittest.main()
| 26.764706 | 79 | 0.736264 |
79580fddc7c126487506b1678e34e6b85a44f66d | 1,692 | py | Python | insert_sort.py | brightmaraba/mandelbrot | 02e07806d1fdeef6e003fac872d6f90a961b1167 | [
"MIT"
] | null | null | null | insert_sort.py | brightmaraba/mandelbrot | 02e07806d1fdeef6e003fac872d6f90a961b1167 | [
"MIT"
] | null | null | null | insert_sort.py | brightmaraba/mandelbrot | 02e07806d1fdeef6e003fac872d6f90a961b1167 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib as mp
import numpy as np
import random
# Set graph style
plt.style.use('fivethirtyeight')
# Create array and shuffle it
n = int(input("Enter array size\n"))
a = [i for i in range(1, n+1)]
random.shuffle(a)
# Define insertion sort
def insertion_sort(a):
for k in range(1, len(a)):
key = a[k]
v = k - 1
while(v >=0 and a[v] > key):
a[v+1] = a[v]
v -= 1
# Yield current position of elements in a
yield a
a[v+1] = key
yield a
# Generator object returned by insert_sort
generator = insertion_sort(a)
# Set colors of bar
data_normalizer = mp.colors.Normalize()
color_map = mp.colors.LinearSegmentedColormap(
"my_map",
{
"red": [(0, 1.0, 1.0),
(1.0, .5, .5)],
"green": [(0, 0.5, 0.5),
(1.0, 0, 0)],
"blue": [(0, 0.50, 0.5),
(1.0, 0, 0)]
}
)
fig, ax = plt.subplots()
# Bar container
rects = ax.bar(range(len(a)), a, align="edge",
color=color_map(data_normalizer(range(n))))
# Set view limit
ax.set_xlim(0, len(a))
ax.set_ylim(0, int(1.1*len(a)))
# Text to be displayed
text = ax.text(0.01, 0.95, "", transform=ax.transAxes)
iteration = [0]
# Animate
def animate(A, rects, iteration):
for rect, val in zip(rects, A):
rect.set_height(val)
iteration[0] += 1
text.set_text("iterations : {}".format(iteration[0]))
anim = FuncAnimation(fig, func=animate,
fargs=(rects, iteration), frames=generator, interval=550,
repeat=False)
plt.show()
| 22.263158 | 77 | 0.575059 |
795810c53da4be12b06d1119f84007a3e9551576 | 5,452 | py | Python | module/error.py | FaberSid/mmo-discord-bot | 9004c0194428819c6f3faba547b3fca47d57cfe9 | [
"MIT"
] | 3 | 2020-05-20T10:12:55.000Z | 2020-11-16T14:58:29.000Z | module/error.py | FaberSid/mmo-discord-bot | 9004c0194428819c6f3faba547b3fca47d57cfe9 | [
"MIT"
] | 4 | 2021-04-05T06:14:51.000Z | 2021-06-14T06:22:05.000Z | module/error.py | FaberSid/mmo-discord-bot | 9004c0194428819c6f3faba547b3fca47d57cfe9 | [
"MIT"
] | 1 | 2021-05-16T11:55:39.000Z | 2021-05-16T11:55:39.000Z | import getpass
import hashlib
import math
import traceback
import aiohttp
from discord import (AsyncWebhookAdapter, Embed, File, RawReactionActionEvent,
Webhook)
from discord.ext import commands as c
from module import item, status
async def _webhook(all_error, url, ctx):
for i in range(len(all_error)):
while len("".join(all_error[i:i+2])) < 1800 and len("".join(all_error[i+1:])) != 0:
all_error[i:i+2] = ["".join(all_error[i:i+2])]
async with aiohttp.ClientSession() as session:
w = Webhook.from_url(url, adapter=AsyncWebhookAdapter(session))
for i in range(0, len(all_error), 3):
await w.send(file=File("variables.txt"), content=f"```py\n! ERROR:{ctx.author} ID:{ctx.author.id}\n! 鯖名:{ctx.guild} チャンネル名:{ctx.channel}\nBOTか否か:{ctx.author.bot}```", embeds=[Embed(title="TAO内部のError情報:", description=f"```py\n{y.replace('`', '')}```").set_footer(text=f"{i + x + 1}/{len(all_error)}") for x, y in enumerate(all_error[i:i + 3])])
class Cog(c.Cog):
def __init__(self, bot):
self.bot = bot
@c.Cog.listener()
async def on_command_error(self, ctx, error):
if not __debug__:
if any([isinstance(error, i) for i in [c.CommandInvokeError, c.CommandNotFound, c.BadArgument, c.UnexpectedQuoteError, c.ExpectedClosingQuoteError, c.InvalidEndOfQuotedStringError]]):
traceback.print_exception(type(error), error, error.__traceback__)
print(error.args)
return
elif isinstance(error, c.DisabledCommand):
await ctx.send(embed=Embed(description="実行したコマンドは開発中か諸事情により開発者が無効化しています"))
return
l_error = traceback.format_exception(
type(error), error, error.__traceback__)
l_error = [x.replace(f"\\{getpass.getuser()}\\", "\\*\\")
for x in l_error if "site-packages" not in x]
webhook = await self.bot.fetch_webhook(712268338189041730)
cnt = None
hash_error = hashlib.sha512(
bytes("".join(l_error), 'shift-jis')).hexdigest()
async for message in webhook.channel.history(limit=None):
if message.embeds:
if message.embeds[0].footer.text == hash_error and message.embeds[0].author.name:
if cnt is None:
cnt = 1 + int(message.embeds[0].author.name[:-8])
await message.delete()
cnt = cnt or 1
def is_limit(embeds, description=""):
"""FIELD LIMIT
title 256 characters
description 2048 characters
fields Up to 25 field objects
field.name 256 characters
field.value 1024 characters
footer.text 2048 characters
author.name 256 characters
Additionally, the characters in all title, description, field.name, field.value, footer.text, and author.name fields must not exceed 6000"""
if len(embeds) == 0:
embeds += [Embed(description=description)]
return embeds, ""
elif 9*sum([bool(e.description) for e in embeds])+sum(map(len, sum([[e.title, e.description, e.footer.text, e.author.name, *sum([[i.name, i.value] for i in e.fields], []), description] for e in embeds], []))) > 6000:
return embeds, description
elif len(embeds[-1].description)+len(description) <= 2048-9:
if embeds[-1].description:
embeds[-1].description += description
else:
embeds[-1].description = description
return embeds, ""
elif len(embeds) < 10:
embeds += [Embed(description=description)]
return embeds, ""
else:
return embeds, description
top = Embed(title="{}: {}".format(type(error).__name__, error)[:256]).set_author(
name=f"{cnt}回目のエラーです").set_footer(text=hash_error)
l_embeds = [[top.copy()]]
description = ""
l_error += ["\n#END Traceback```\n**発生場所**\nGuild:{}(ID:{})\nchannel:{}(ID:{})\nuser:{}(ID:{})\nLink:[ここ]({})\n```escape".format(
ctx.guild.name, ctx.guild.id, ctx.channel.name, ctx.channel.id, ctx.author.name, ctx.author.id, ctx.message.jump_url)]
while l_error:
if not description:
description = l_error.pop(0)
l_embeds[-1], description = is_limit(l_embeds[-1], description)
if description:
l_embeds += [[top.copy()]]
for i in l_embeds:
for j in i:
j.description = "```py\n"+j.description+"```"
for i, embeds in enumerate(l_embeds):
await webhook.send(None if i else "<@&599220739815505941>修正よろしくね!", embeds=embeds, wait=True)
if cnt == 1:
item.obtain_an_item(ctx.author.id, -8)
exp = math.ceil(status.get_player_level(ctx.author.id) / 7)
first_err_msg = "\n\nあ、またバグが見つかったんだ。\nしかも今までにないエラーか\n<@{}>は{}の経験値と{}を得た。\n{}".format(
ctx.author.id, exp, item.items.get("-8", {"name": "unknown"})["name"], status.experiment(ctx.author.id, exp))
else:
first_err_msg = ""
await ctx.send(embed=Embed(title="エラーが発生しました", description="発生したエラーは開発者が調査中です"+first_err_msg).set_footer(text="hash: "+hash_error))
def setup(bot):
bot.add_cog(Cog(bot))
| 49.117117 | 362 | 0.588408 |
795810df92887bf6896548fd63a44f5c1c3c135e | 21,478 | py | Python | codes/SRN/models/DASR_model.py | madin162/AI604_project | d17b7a062bd1d55367136d15fabef64664d328b6 | [
"MIT"
] | null | null | null | codes/SRN/models/DASR_model.py | madin162/AI604_project | d17b7a062bd1d55367136d15fabef64664d328b6 | [
"MIT"
] | null | null | null | codes/SRN/models/DASR_model.py | madin162/AI604_project | d17b7a062bd1d55367136d15fabef64664d328b6 | [
"MIT"
] | 1 | 2021-11-24T05:41:29.000Z | 2021-11-24T05:41:29.000Z | import sys
import os
import cv2
import logging
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import lr_scheduler
from utils.util import forward_chop
import models.networks as networks
from .base_model import BaseModel
from models.modules.loss import GANLoss, GradientPenaltyLoss, PerceptualLoss
logger = logging.getLogger('base')
from pytorch_wavelets import DWTForward, DWTInverse
from utils.util import b_split, b_merge
from models.modules.architecture import FilterHigh, FilterLow
from PerceptualSimilarity.models.util import PerceptualLoss as val_lpips
import utils.util as util
from PerceptualSimilarity.util import util as util_LPIPS
class DASR_Model(BaseModel):
def __init__(self, opt):
super(DASR_Model, self).__init__(opt)
train_opt = opt['train']
self.chop = opt['chop']
self.scale = opt['scale']
self.val_lpips = opt['val_lpips']
self.adaptive_weights = opt['adaptive_weights']
self.multiweights = opt['multiweights']
# GD gan loss
self.ragan = train_opt['ragan']
self.cri_gan = GANLoss(train_opt['gan_type'], 1.0, 0.0).to(self.device)
self.l_gan_H_target_w = train_opt['gan_H_target']
self.l_gan_H_source_w = train_opt['gan_H_source']
# define networks and load pretrained models
self.netG = networks.define_G(opt).to(self.device) # G
if self.is_train:
if self.l_gan_H_target_w > 0:
self.netD_target = networks.define_D(opt).to(self.device) # D
self.netD_target.train()
if self.l_gan_H_source_w > 0:
self.netD_source = networks.define_pairD(opt).to(self.device) # D
self.netD_source.train()
self.netG.train()
self.load() # load G and D if needed
# Frequency Separation
self.norm = train_opt['norm']
if train_opt['fs'] == 'wavelet':
# Wavelet
self.DWT2 = DWTForward(J=1, mode='reflect', wave='haar').to(self.device)
self.fs = self.wavelet_s
self.filter_high = FilterHigh(kernel_size=train_opt['fs_kernel_size'], gaussian=True).to(self.device)
elif train_opt['fs'] == 'gau':
# Gaussian
self.filter_low, self.filter_high = FilterLow(kernel_size=train_opt['fs_kernel_size'], gaussian=True).to(self.device), \
FilterHigh(kernel_size=train_opt['fs_kernel_size'], gaussian=True).to(self.device)
self.fs = self.filter_func
elif train_opt['fs'] == 'avgpool':
# avgpool
self.filter_low, self.filter_high = FilterLow(kernel_size=train_opt['fs_kernel_size']).to(self.device), \
FilterHigh(kernel_size=train_opt['fs_kernel_size']).to(self.device)
self.fs = self.filter_func
else:
raise NotImplementedError('FS type [{:s}] not recognized.'.format(train_opt['fs']))
# define losses, optimizer and scheduler
if self.is_train:
# G pixel loss
if train_opt['pixel_weight'] > 0:
l_pix_type = train_opt['pixel_criterion']
if l_pix_type == 'l1':
self.cri_pix = nn.L1Loss().to(self.device)
elif l_pix_type == 'l2':
self.cri_pix = nn.MSELoss().to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_pix_type))
self.l_pix_w = train_opt['pixel_weight']
self.l_pix_LL_w = train_opt['pixel_LL_weight']
self.sup_LL = train_opt['sup_LL']
else:
logger.info('Remove pixel loss.')
self.cri_pix = None
self.l_fea_type = train_opt['feature_criterion']
# G feature loss
if train_opt['feature_weight'] > 0:
if self.l_fea_type == 'l1':
self.cri_fea = nn.L1Loss().to(self.device)
elif self.l_fea_type == 'l2':
self.cri_fea = nn.MSELoss().to(self.device)
elif self.l_fea_type == 'LPIPS':
self.cri_fea = PerceptualLoss().to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] not recognized.'.format(self.l_fea_type))
self.l_fea_w = train_opt['feature_weight']
else:
logger.info('Remove feature loss.')
self.cri_fea = None
if self.cri_fea and self.l_fea_type in ['l1', 'l2']: # load VGG perceptual loss
self.netF = networks.define_F(opt, use_bn=False).to(self.device)
# D_update_ratio and D_init_iters are for WGAN
self.G_update_inter = train_opt['G_update_inter']
self.D_update_inter = train_opt['D_update_inter']
self.D_update_ratio = train_opt['D_update_ratio'] if train_opt['D_update_ratio'] else 1
self.D_init_iters = train_opt['D_init_iters'] if train_opt['D_init_iters'] else 0
if train_opt['gan_type'] == 'wgan-gp':
self.random_pt = torch.Tensor(1, 1, 1, 1).to(self.device)
# gradient penalty loss
self.cri_gp = GradientPenaltyLoss(device=self.device).to(self.device)
self.l_gp_w = train_opt['gp_weigth']
# optimizers
# G
wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0
optim_params = []
for k, v in self.netG.named_parameters(): # can optimize for a part of the model
if v.requires_grad:
optim_params.append(v)
else:
logger.warning('Params [{:s}] will not optimize.'.format(k))
self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt['lr_G'], \
weight_decay=wd_G, betas=(train_opt['beta1_G'], 0.999))
self.optimizers.append(self.optimizer_G)
# D
if self.l_gan_H_target_w > 0:
wd_D = train_opt['weight_decay_D'] if train_opt['weight_decay_D'] else 0
self.optimizer_D_target = torch.optim.Adam(self.netD_target.parameters(), lr=train_opt['lr_D'], \
weight_decay=wd_D, betas=(train_opt['beta1_D'], 0.999))
self.optimizers.append(self.optimizer_D_target)
if self.l_gan_H_source_w > 0:
wd_D = train_opt['weight_decay_D'] if train_opt['weight_decay_D'] else 0
self.optimizer_D_source = torch.optim.Adam(self.netD_source.parameters(), lr=train_opt['lr_D'], \
weight_decay=wd_D, betas=(train_opt['beta1_D'], 0.999))
self.optimizers.append(self.optimizer_D_source)
# schedulers
if train_opt['lr_scheme'] == 'MultiStepLR':
for optimizer in self.optimizers:
self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
train_opt['lr_steps'], train_opt['lr_gamma']))
else:
raise NotImplementedError('MultiStepLR learning rate scheme is enough.')
self.log_dict = OrderedDict()
# print network
self.print_network()
# # Debug
if self.val_lpips:
self.cri_fea_lpips = val_lpips(model='net-lin', net='alex').to(self.device)
def feed_data(self, data, istrain):
# LR
if istrain and 'HR' in data: # train or val
HR_pair = data['HR'].to(self.device)
HR_unpair = data['HR_unpair'].to(self.device)
fake_w = data['fake_w'].to(self.device)
real_LR = data['LR_real'].to(self.device)
fake_LR = data['LR_fake'].to(self.device)
self.var_L = torch.cat([fake_LR, real_LR], dim=0)
self.var_H = torch.cat([HR_pair, HR_unpair], dim=0)
self.weights = fake_w
self.weights = F.interpolate(self.weights, size=(HR_pair.shape[2], HR_pair.shape[3]),
mode='bilinear', align_corners=False)
self.mask = []
B = self.var_L.shape[0]
self.mask += [0] * (B // 2)
self.mask += [1] * (B - B//2)
else:
self.var_L = data['LR'].to(self.device)
if 'HR' in data:
self.var_H = data['HR'].to(self.device)
self.needHR = True
else:
self.needHR = False
def optimize_parameters(self, step):
# G
self.fake_H = self.netG(self.var_L)
self.fake_LL, self.fake_Hc = self.fs(self.fake_H, norm=self.norm)
self.real_LL, self.real_Hc = self.fs(self.var_H, norm=self.norm)
# Splitting data
# Fake data
self.fake_SR_source, _ = b_split(self.fake_H, self.mask)
self.fake_SR_LL_source, _ = b_split(self.fake_LL, self.mask)
self.fake_SR_Hf_source, self.fake_SR_Hf_target = b_split(self.fake_Hc, self.mask)
# Real data
self.real_HR_source, _ = b_split(self.var_H, self.mask)
self.real_HR_LL_source, _ = b_split(self.real_LL, self.mask)
self.real_HR_Hf_source, self.real_HR_Hf_target = b_split(self.real_Hc, self.mask)
if step % self.G_update_inter == 0:
l_g_total = 0
if self.cri_pix: # pixel loss
if self.multiweights:
l_g_pix = self.l_pix_w * \
torch.mean(self.weights * torch.abs(self.fake_SR_source - self.real_HR_source))
else:
l_g_pix = self.cri_pix(self.fake_SR_source, self.real_HR_source)
l_g_total += self.l_pix_w * l_g_pix
if self.sup_LL:
l_g_LL_pix = self.cri_pix(self.fake_SR_LL_source, self.real_HR_LL_source)
l_g_total += self.l_pix_LL_w * l_g_LL_pix
if self.l_fea_type in ['l1', 'l2'] and self.cri_fea: # feature loss
real_fea = self.netF(self.real_HR_source).detach()
fake_fea = self.netF(self.fake_SR_source)
l_g_fea = self.cri_fea(fake_fea, real_fea)
l_g_total += self.l_fea_w * l_g_fea
elif self.l_fea_type == 'LPIPS' and self.cri_fea:
l_g_fea = self.cri_fea(self.fake_SR_source, self.real_HR_source)
l_g_total += self.l_fea_w * l_g_fea
# G gan target loss
if self.l_gan_H_target_w > 0:
pred_g_Hf_target_fake = self.netD_target(self.fake_SR_Hf_target)
if self.ragan:
pred_g_Hf_target_real = self.netD_target(self.real_HR_Hf_target).detach()
l_g_gan_target_Hf = self.l_gan_H_target_w * \
(self.cri_gan(pred_g_Hf_target_fake - pred_g_Hf_target_real.mean(0, keepdim=True), True) +
self.cri_gan(pred_g_Hf_target_real - pred_g_Hf_target_fake.mean(0, keepdim=True), False)) / 2
else:
l_g_gan_target_Hf = self.cri_gan(pred_g_Hf_target_fake, True)
l_g_total += self.l_gan_H_target_w * l_g_gan_target_Hf
# G_gan_source_loss
if self.l_gan_H_source_w > 0:
pred_g_Hf_source_fake = self.netD_source(self.fake_SR_Hf_source)
if self.ragan:
pred_g_Hf_source_real = self.netD_source(self.real_HR_Hf_source).detach()
l_g_gan_source_Hf = self.l_gan_H_source_w * \
(self.cri_gan(pred_g_Hf_source_fake - pred_g_Hf_source_real.mean(0, keepdim=True), True) +
self.cri_gan(pred_g_Hf_source_real - pred_g_Hf_source_fake.mean(0, keepdim=True), False)) / 2
else:
l_g_gan_source_Hf = self.l_gan_H_source_w * self.cri_gan(pred_g_Hf_source_fake, True)
l_g_total += l_g_gan_source_Hf
self.optimizer_G.zero_grad()
l_g_total.backward()
self.optimizer_G.step()
# D
if step % self.D_update_inter == 0:
# target domain
if self.l_gan_H_target_w > 0:
pred_d_target_real = self.netD_target(self.real_HR_Hf_target.detach())
pred_d_target_fake = self.netD_target(self.fake_SR_Hf_target.detach()) # detach to avoid BP to G
if self.ragan:
l_d_target_real = self.cri_gan(pred_d_target_real - pred_d_target_fake.mean(0, keepdim=True), True)
l_d_target_fake = self.cri_gan(pred_d_target_fake - pred_d_target_real.mean(0, keepdim=True), False)
else:
l_d_target_real = self.cri_gan(pred_d_target_real, True)
l_d_target_fake = self.cri_gan(pred_d_target_fake, False)
l_d_target_total = (l_d_target_real + l_d_target_fake) / 2
self.optimizer_D_target.zero_grad()
l_d_target_total.backward()
self.optimizer_D_target.step()
# source domain
if self.l_gan_H_source_w > 0:
pred_d_source_real = self.netD_source(self.real_HR_Hf_source.detach())
pred_d_source_fake = self.netD_source(self.fake_SR_Hf_source.detach()) # detach to avoid BP to G
if self.ragan:
l_d_source_real = self.cri_gan(pred_d_source_real - pred_d_source_fake.mean(0, keepdim=True), True)
l_d_source_fake = self.cri_gan(pred_d_source_fake - pred_d_source_real.mean(0, keepdim=True), False)
else:
l_d_source_real = self.cri_gan(pred_d_source_real, True)
l_d_source_fake = self.cri_gan(pred_d_source_fake, False)
l_d_source_total = (l_d_source_fake + l_d_source_real) / 2
self.optimizer_D_source.zero_grad()
l_d_source_total.backward()
self.optimizer_D_source.step()
# set log
if step % self.G_update_inter == 0:
# G
if self.cri_pix:
self.log_dict['loss/l_g_pix'] = l_g_pix.item()
if self.sup_LL:
self.log_dict['loss/l_g_LL_pix'] = l_g_LL_pix.item()
if self.cri_fea:
self.log_dict['loss/l_g_fea'] = l_g_fea.item()
if self.l_gan_H_target_w > 0:
self.log_dict['loss/l_g_gan_target_Hf'] = l_g_gan_target_Hf.item()
if self.l_gan_H_source_w > 0:
self.log_dict['loss/l_g_gan_source_H'] = l_g_gan_source_Hf.item()
# if self.opt['train']['gan_type'] == 'wgan-gp':
# self.log_dict['l_d_gp'] = l_d_gp.item()
# D outputs
if step % self.D_update_inter == 0:
if self.l_gan_H_target_w > 0:
self.log_dict['loss/l_d_target_total'] = l_d_target_total.item()
self.log_dict['disc_Score/D_real_target_H'] = torch.mean(pred_d_target_real.detach()).item()
self.log_dict['disc_Score/D_fake_target_H'] = torch.mean(pred_d_target_fake.detach()).item()
if self.l_gan_H_source_w > 0:
self.log_dict['loss/l_d_total'] = l_d_source_total.item()
self.log_dict['disc_Score/D_real_source_H'] = torch.mean(pred_d_source_real.detach()).item()
self.log_dict['disc_Score/D_fake_source_H'] = torch.mean(pred_d_source_fake.detach()).item()
def test(self, tsamples=False):
self.netG.eval()
with torch.no_grad():
if self.chop:
self.fake_H = forward_chop(self.var_L, self.scale, self.netG, min_size=320000)
else:
self.fake_H = self.netG(self.var_L)
if not tsamples and self.val_lpips:
fake_H, real_H = util.tensor2img(self.fake_H), util.tensor2img(self.var_H)
fake_H, real_H = fake_H[:, :, [2, 1, 0]], real_H[:, :, [2, 1, 0]]
fake_H, real_H = util_LPIPS.im2tensor(fake_H), util_LPIPS.im2tensor(real_H)
self.LPIPS = self.cri_fea_lpips(fake_H, real_H)[0][0][0][0]
self.netG.train()
def get_current_log(self):
return self.log_dict
def get_current_visuals(self, need_HR=True, tsamples=False):
out_dict = OrderedDict()
out_dict['LR'] = self.var_L.detach()[0].float().cpu()
if tsamples:
out_dict['hf'] = self.filter_high(self.fake_H).float().cpu()
out_dict['gt_hf'] = self.filter_high(self.var_H).float().cpu()
out_dict['HR'] = self.var_H.detach()[0].float().cpu()
out_dict['HR_hf'] = self.filter_high(self.var_H).detach().float().cpu()
if not tsamples:
out_dict['SR'] = self.fake_H.detach()[0].float().cpu()
else:
out_dict['SR'] = self.fake_H.detach().float().cpu()
if not tsamples and self.val_lpips:
out_dict['LPIPS'] = self.LPIPS.detach().float().cpu()
if not tsamples and self.needHR:
out_dict['HR'] = self.var_H.detach()[0].float().cpu()
return out_dict
def print_network(self):
# Generator
s, n = self.get_network_description(self.netG)
if isinstance(self.netG, nn.DataParallel):
net_struc_str = '{} - {}'.format(self.netG.__class__.__name__,
self.netG.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.netG.__class__.__name__)
logger.info('Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n))
logger.info(s)
if self.is_train:
# Discriminator_wlt
if self.l_gan_H_target_w > 0:
s, n = self.get_network_description(self.netD_target)
if isinstance(self.netD_target, nn.DataParallel):
net_struc_str = '{} - {}'.format(self.netD_target.__class__.__name__,
self.netD_target.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.netD_target.__class__.__name__)
logger.info('Network D_target structure: {}, with parameters: {:,d}'.format(net_struc_str, n))
logger.info(s)
if self.l_gan_H_source_w > 0:
# Discriminator_pair
s, n = self.get_network_description(self.netD_source)
if isinstance(self.netD_source, nn.DataParallel):
net_struc_str = '{} - {}'.format(self.netD_source.__class__.__name__,
self.netD_source.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.netD_source.__class__.__name__)
logger.info('Network D_source structure: {}, with parameters: {:,d}'.format(net_struc_str, n))
logger.info(s)
if self.cri_fea and self.l_fea_type in ['l1', 'l2']: # F, Perceptual Network
s, n = self.get_network_description(self.netF)
if isinstance(self.netF, nn.DataParallel):
net_struc_str = '{} - {}'.format(self.netF.__class__.__name__,
self.netF.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.netF.__class__.__name__)
logger.info('Network F structure: {}, with parameters: {:,d}'.format(net_struc_str, n))
logger.info(s)
def load(self):
load_path_G = self.opt['path']['pretrain_model_G']
if load_path_G is not None:
logger.info('Loading pretrained model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG)
load_path_D_target = self.opt['path']['pretrain_model_D_target']
if self.opt['is_train'] and load_path_D_target is not None:
logger.info('Loading pretrained model for D_target [{:s}] ...'.format(load_path_D_target))
self.load_network(load_path_D_target, self.netD_target)
load_path_D_source = self.opt['path']['pretrain_model_D_source']
if self.opt['is_train'] and load_path_D_source is not None:
logger.info('Loading pretrained model for D_source [{:s}] ...'.format(load_path_D_source))
self.load_network(load_path_D_source, self.netD_source)
def save(self, iter_step):
self.save_network(self.netG, 'G', iter_step)
if self.l_gan_H_target_w > 0:
self.save_network(self.netD_target, 'D_target', iter_step)
if self.l_gan_H_source_w > 0:
self.save_network(self.netD_source, 'D_source', iter_step)
def wavelet_s(self, x, norm=False):
LL, Hc = self.DWT2(x)
Hc = Hc[0]
if norm:
LL, Hc = LL * 0.5, Hc * 0.5 + 0.5 # norm [0, 1]
LH, HL, HH = Hc[:, :, 0, :, :], \
Hc[:, :, 1, :, :], \
Hc[:, :, 2, :, :]
Hc = torch.cat((LH, HL, HH), dim=1)
return LL, Hc
def filter_func(self, x, norm=False):
low_f, high_f = self.filter_low(x), self.filter_high(x)
if norm:
high_f = high_f * 0.5 + 0.5
return low_f, high_f
| 46.691304 | 132 | 0.581758 |
795810e31b7403460ba5c8bc93d542eab1135815 | 964 | py | Python | custom_components/xiaomi_miot/core/const.py | ss109/hass-xiaomi-miot | a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd | [
"Apache-2.0"
] | 1 | 2021-12-10T12:30:34.000Z | 2021-12-10T12:30:34.000Z | custom_components/xiaomi_miot/core/const.py | ss109/hass-xiaomi-miot | a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd | [
"Apache-2.0"
] | null | null | null | custom_components/xiaomi_miot/core/const.py | ss109/hass-xiaomi-miot | a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd | [
"Apache-2.0"
] | null | null | null | from .device_customizes import DEVICE_CUSTOMIZES
from .miot_local_devices import MIOT_LOCAL_MODELS # noqa
from .translation_languages import TRANSLATION_LANGUAGES # noqa
DOMAIN = 'xiaomi_miot'
DEFAULT_NAME = 'Xiaomi Miot'
CONF_MODEL = 'model'
CONF_XIAOMI_CLOUD = 'xiaomi_cloud'
CONF_SERVER_COUNTRY = 'server_country'
CONF_CONN_MODE = 'conn_mode'
CONF_CONFIG_VERSION = 'config_version'
DEFAULT_CONN_MODE = 'cloud'
SUPPORTED_DOMAINS = [
'sensor',
'binary_sensor',
'switch',
'light',
'fan',
'climate',
'cover',
'humidifier',
'media_player',
'camera',
'vacuum',
'water_heater',
'device_tracker',
'remote',
'number',
'alarm_control_panel',
]
try:
# hass 2021.7.0b0+
from homeassistant.components.select import DOMAIN as DOMAIN_SELECT
SUPPORTED_DOMAINS.append(DOMAIN_SELECT)
except ModuleNotFoundError:
DOMAIN_SELECT = None
GLOBAL_CUSTOMIZES = {
'models': DEVICE_CUSTOMIZES,
}
| 20.956522 | 71 | 0.71473 |
7958127fdcb5edc8709b342c9886ed526370c1c4 | 4,483 | py | Python | datasets/cocov2.py | VinhLoiIT/EfficientDet.Pytorch | a5a753c6566c21f8c3fad12798efc48295d11a00 | [
"MIT"
] | null | null | null | datasets/cocov2.py | VinhLoiIT/EfficientDet.Pytorch | a5a753c6566c21f8c3fad12798efc48295d11a00 | [
"MIT"
] | null | null | null | datasets/cocov2.py | VinhLoiIT/EfficientDet.Pytorch | a5a753c6566c21f8c3fad12798efc48295d11a00 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import csv
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
from pycocotools.coco import COCO
import skimage.io
import skimage.transform
import skimage.color
import skimage
import cv2
from PIL import Image
class CocoDataset(Dataset):
"""Coco dataset."""
def __init__(self, root_dir, set_name='train2017', transform=None):
"""
Args:
root_dir (string): COCO directory.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.set_name = set_name
self.transform = transform
self.coco = COCO(os.path.join(self.root_dir, 'train_traffic_sign_dataset.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
def load_classes(self):
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
target = np.array(annot)
bbox = target[:, :4]
labels = target[:, 4]
labels = np.array(labels, dtype=np.int)
if self.transform is not None:
annotation = {'image': img, 'bboxes': bbox, 'category_id': labels}
augmentation = self.transform(**annotation)
img = augmentation['image']
bbox = augmentation['bboxes']
labels = augmentation['category_id']
return {'image': img, 'bboxes': bbox, 'category_id': labels}
def load_image(self, image_index):
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.root_dir, 'images', image_info['file_name'])
img = cv2.imread(path)
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
return img
def load_annotations(self, image_index):
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, :4] = a['bbox']
annotation[0, 4] = self.coco_label_to_label(a['category_id'])
annotations = np.append(annotations, annotation, axis=0)
# transform from [x, y, w, h] to [x1, y1, x2, y2]
annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
return annotations
def coco_label_to_label(self, coco_label):
return self.coco_labels_inverse[coco_label]
def label_to_coco_label(self, label):
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def num_classes(self):
return 80
if __name__=='__main__':
from augmentation import get_augumentation
dataset = CocoDataset(root_dir = '/root/data/coco', set_name='trainval35k', transform=get_augumentation(phase='train'))
sample = dataset[0]
print('sample: ', sample)
| 33.207407 | 123 | 0.618782 |
7958138ddadd28c8c8f545147d8779f195419afd | 7,741 | py | Python | Assets/menus/end_level.py | ChristianD37/Natural-Selection | c3cc240838226124b07c26c4e64b9facc56870d1 | [
"MIT"
] | 11 | 2020-06-28T05:19:34.000Z | 2021-11-19T17:52:30.000Z | Assets/menus/end_level.py | ChristianD37/Natural-Selection | c3cc240838226124b07c26c4e64b9facc56870d1 | [
"MIT"
] | null | null | null | Assets/menus/end_level.py | ChristianD37/Natural-Selection | c3cc240838226124b07c26c4e64b9facc56870d1 | [
"MIT"
] | 1 | 2021-01-08T23:58:43.000Z | 2021-01-08T23:58:43.000Z | import pygame, os,json
from Assets.menus.menu import Menu
from Assets.util.file_handler import write_save
class LevelComplete(Menu):
def __init__(self, game):
Menu.__init__(self, game)
self.credx, self.credy = self.mid_w, self.mid_h / 4
self.text_display = ""
self.text = list("You got the bread!")
self.last_update = pygame.time.get_ticks()
self.text_displayed, self.done_counting, self.display_collect = False, False, False
self.count = 0
self.count_color = (255, 255, 255)
self.background_update = 0
self.R, self.G, self.B = 0,0,0
self.continue_timer = 0
self.draw_continue = True
self.sleep_frames = []
self.sleep_index, self.sleep_timer = 0,0
for i in range(1,7):
self.sleep_frames.append(pygame.transform.scale(
self.game.duck_sheet.get_sprite('duck_sleep' + str(i) + '.png'), (152,152)))
def display_menu(self):
pygame.mixer.music.load(os.path.join(self.game.menu_music, "Safe Place.ogg"))
pygame.mixer.music.play(loops=-1)
self.run_display = True
while self.run_display:
self.game.get_delta()
self.handle_background()
self.game.display.fill((self.R,self.G,self.B))
self.game.display.blit(self.sleep_frames[self.sleep_index], (self.credx - 78, self.game.DISPLAY_H * .5))
if not self.text_displayed: self.update_text()
self.draw_text(self.text_display, 20, pygame.Color((255, 255, 255)), self.credx, self.credy)
if self.text_displayed: self.show_collectibles()
if self.done_counting: self.draw_fruit()
if self.display_collect:
self.continue_text()
if self.draw_continue:
self.draw_text('Press Start to Continue', 20, (255, 255, 255), self.credx, self.game.DISPLAY_H * .90)
self.blit_screen()
self.check_events()
if (self.game.START_KEY or self.game.BACK_KEY or self.game.JUMP_KEY or self.game.RUN_KEY) and self.display_collect:
self.run_display = False
self.game.playing = True
self.game.reset_keys()
pygame.mixer.music.stop()
self.reset_counts()
self.write_data()
def update_text(self):
now = pygame.time.get_ticks()
if now - self.last_update > 125:
self.last_update = now
if self.text:
self.text_display += self.text.pop(0)
else: self.text_displayed = True
def continue_text(self):
now = pygame.time.get_ticks()
if now - self.continue_timer > 700:
self.continue_timer = now
self.draw_continue = not self.draw_continue
def show_collectibles(self):
if not self.done_counting:
now = pygame.time.get_ticks()
if now - self.last_update > 100:
self.last_update = now
if self.count < self.game.player.berry_count:
self.count += 1
self.game.sound_effects['berry_collect'].play()
else: self.done_counting = True;
if self.count == 30: self.count_color = (255, 223, 0)
self.game.display.blit(self.game.berries_hud.image, (self.credx - 100 , self.credy + 85))
self.draw_text(' x ' + str(self.count), 20, pygame.Color(self.count_color), self.credx , self.credy + 100)
def draw_fruit(self):
i = 0
now = pygame.time.get_ticks()
if now - self.last_update > 1000:
for sprite in self.game.hud.sprites[2:]:
self.game.display.blit(sprite.image, (self.credx - 100 + i , self.credy + 165))
i += 50
self.display_collect = True
def reset_counts(self):
self.text_displayed, self.done_counting, self.display_collect = False, False, False
self.count = 0
self.count_color = (255, 255, 255)
self.text_display = ""
self.text = list("You got the bread!")
def handle_sleep(self):
now = pygame.time.get_ticks()
if now - self.sleep_timer > 230:
self.sleep_timer = now
self.sleep_index = (self.sleep_index + 1) % len(self.sleep_frames)
def handle_background(self):
now = pygame.time.get_ticks()
if now - self.background_update > 20:
self.background_update = now
self.R = min(self.R+1, 25)
self.G = min(self.G + 1, 25)
self.B = min(self.B + 1, 65)
self.handle_sleep()
def write_data(self):
# Check how many berries were collected
berry_max = False
if self.game.player.berry_count > self.game.save_data["level"][str(self.game.levelnum - 1)]["berries"]:
self.game.save_data["level"][str(self.game.levelnum - 1)]["berries"] = self.game.player.berry_count
berry_max = True
# Check for fruit collected
for fruit in self.game.player.fruits:
if not self.game.save_data["level"][str(self.game.levelnum - 1)]["items"][fruit]:
self.game.save_data["level"][str(self.game.levelnum - 1)]["items"][fruit] = True
# Check if level is complete
if berry_max and len(self.game.player.fruits) > 2:
self.game.save_data["level"][str(self.game.levelnum)]["complete"] = True
# Unlock next level
self.game.save_data["level"][str(self.game.levelnum)]["unlocked"] = True
write_save(self.game.options_dir, self.game.save_data)
with open(os.path.join(self.game.options_dir, "save.json"), 'r+') as file:
self.save_data = json.load(file)
class GameOver(Menu):
def __init__(self, game):
Menu.__init__(self, game)
self.game_over_text = self.font.render('Game Over', True, (255,255,255))
self.gox, self.goy = self.mid_w -100, -40
self.last_update, self.current_frame = 0,0
self.duck_frames = []
for i in range(1,9):
self.duck_frames.append(self.game.duck_sheet.get_sprite('ghost_duck' + str(i) + '.png'))
self.ghost_image = self.duck_frames[0]
def display_menu(self):
self.gox, self.goy = self.mid_w - 100, -40
pygame.mixer.music.load(os.path.join(self.game.menu_music, "Game Over.ogg"))
pygame.mixer.music.play(loops=1)
self.run_display = True
now, marker = pygame.time.get_ticks(), pygame.time.get_ticks()
while self.run_display:
now = pygame.time.get_ticks()
self.game.get_delta()
self.game.display.fill((0, 0, 0))
self.handle_text()
self.animate_ghost()
self.blit_screen()
self.check_events()
if now - marker > 7000:
self.run_display = False
self.game.playing = False
self.game.reset_keys()
self.game.menu = self.game.Main
def handle_text(self):
if self.goy < self.mid_h:
self.goy += 1 * self.game.dt
self.game.display.blit(self.game_over_text, (self.gox,self.goy))
def animate_ghost(self):
now = pygame.time.get_ticks()
if now - self.last_update > 175:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(self.duck_frames)
self.ghost_image = self.duck_frames[self.current_frame]
self.game.display.blit(self.ghost_image, (self.game.DISPLAY_W * .65, self.game.DISPLAY_H * .60))
| 43.734463 | 128 | 0.584679 |
79581492e0d9b950d401c4f0db5063bc3036a3ba | 2,425 | py | Python | CIM15/IEC61970/OperationalLimits/ActivePowerLimitSet.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 58 | 2015-04-22T10:41:03.000Z | 2022-03-29T16:04:34.000Z | CIM15/IEC61970/OperationalLimits/ActivePowerLimitSet.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 12 | 2015-08-26T03:57:23.000Z | 2020-12-11T20:14:42.000Z | CIM15/IEC61970/OperationalLimits/ActivePowerLimitSet.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 35 | 2015-01-10T12:21:03.000Z | 2020-09-09T08:18:16.000Z | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.OperationalLimits.OperationalLimitSet import OperationalLimitSet
class ActivePowerLimitSet(OperationalLimitSet):
def __init__(self, ActivePowerLimits=None, *args, **kw_args):
"""Initialises a new 'ActivePowerLimitSet' instance.
@param ActivePowerLimits:
"""
self._ActivePowerLimits = []
self.ActivePowerLimits = [] if ActivePowerLimits is None else ActivePowerLimits
super(ActivePowerLimitSet, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ActivePowerLimits"]
_many_refs = ["ActivePowerLimits"]
def getActivePowerLimits(self):
return self._ActivePowerLimits
def setActivePowerLimits(self, value):
for x in self._ActivePowerLimits:
x.ActivePowerLimitSet = None
for y in value:
y._ActivePowerLimitSet = self
self._ActivePowerLimits = value
ActivePowerLimits = property(getActivePowerLimits, setActivePowerLimits)
def addActivePowerLimits(self, *ActivePowerLimits):
for obj in ActivePowerLimits:
obj.ActivePowerLimitSet = self
def removeActivePowerLimits(self, *ActivePowerLimits):
for obj in ActivePowerLimits:
obj.ActivePowerLimitSet = None
| 38.492063 | 87 | 0.72866 |
795814f3d7b1fdfbdcfc41eee23ec66d0a14001c | 48 | py | Python | skompiler/fromskast/__init__.py | odinsemvosem/SKompiler | e46264796c8695497f43f6653688f5bcdbc0cfae | [
"MIT"
] | 112 | 2018-12-12T03:54:28.000Z | 2022-01-14T14:18:42.000Z | skompiler/fromskast/__init__.py | odinsemvosem/SKompiler | e46264796c8695497f43f6653688f5bcdbc0cfae | [
"MIT"
] | 10 | 2018-12-20T17:21:09.000Z | 2022-03-24T19:31:55.000Z | skompiler/fromskast/__init__.py | odinsemvosem/SKompiler | e46264796c8695497f43f6653688f5bcdbc0cfae | [
"MIT"
] | 7 | 2019-02-05T05:20:05.000Z | 2021-03-21T16:31:38.000Z | """
SKompiler: Code generation from SK-AST.
"""
| 12 | 39 | 0.666667 |
79581525b50bb8c614d4a944276df132b30d1d5e | 39,742 | py | Python | sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 50,961 | 2015-01-01T06:06:31.000Z | 2022-03-31T23:40:12.000Z | sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 17,065 | 2015-01-01T02:01:58.000Z | 2022-03-31T23:48:34.000Z | sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | MaiRajborirug/scikit-learn | c18d015372f7041099d19c215cd4c36ffd6fe5c5 | [
"BSD-3-Clause"
] | 26,886 | 2015-01-01T00:59:27.000Z | 2022-03-31T18:03:23.000Z | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from sklearn.datasets import make_classification, make_regression
from sklearn.datasets import make_low_rank_matrix
from sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.base import clone, BaseEstimator, TransformerMixin
from sklearn.base import is_regressor
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_poisson_deviance
from sklearn.dummy import DummyRegressor
from sklearn.exceptions import NotFittedError
from sklearn.compose import make_column_transformer
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.loss import _LOSSES
from sklearn.ensemble._hist_gradient_boosting.loss import LeastSquares
from sklearn.ensemble._hist_gradient_boosting.loss import BinaryCrossEntropy
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.utils import shuffle
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
n_threads = _openmp_effective_n_threads()
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
X_multi_classification, y_multi_classification = make_classification(
n_classes=3, n_informative=3, random_state=0
)
def _make_dumb_dataset(n_samples):
"""Make a dumb dataset to test early stopping."""
rng = np.random.RandomState(42)
X_dumb = rng.randn(n_samples, 1)
y_dumb = (X_dumb[:, 0] > 0).astype("int64")
return X_dumb, y_dumb
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
@pytest.mark.parametrize(
"params, err_msg",
[
({"loss": "blah"}, "Loss blah is not supported for"),
({"learning_rate": 0}, "learning_rate=0 must be strictly positive"),
({"learning_rate": -1}, "learning_rate=-1 must be strictly positive"),
({"max_iter": 0}, "max_iter=0 must not be smaller than 1"),
({"max_leaf_nodes": 0}, "max_leaf_nodes=0 should not be smaller than 2"),
({"max_leaf_nodes": 1}, "max_leaf_nodes=1 should not be smaller than 2"),
({"max_depth": 0}, "max_depth=0 should not be smaller than 1"),
({"min_samples_leaf": 0}, "min_samples_leaf=0 should not be smaller"),
({"l2_regularization": -1}, "l2_regularization=-1 must be positive"),
({"max_bins": 1}, "max_bins=1 should be no smaller than 2 and no larger"),
({"max_bins": 256}, "max_bins=256 should be no smaller than 2 and no"),
({"n_iter_no_change": -1}, "n_iter_no_change=-1 must be positive"),
({"validation_fraction": -1}, "validation_fraction=-1 must be strictly"),
({"validation_fraction": 0}, "validation_fraction=0 must be strictly"),
({"tol": -1}, "tol=-1 must not be smaller than 0"),
],
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
def test_invalid_classification_loss():
binary_clf = HistGradientBoostingClassifier(loss="binary_crossentropy")
err_msg = (
"loss='binary_crossentropy' is not defined for multiclass "
"classification with n_classes=3, use "
"loss='categorical_crossentropy' instead"
)
with pytest.raises(ValueError, match=err_msg):
binary_clf.fit(np.zeros(shape=(3, 2)), np.arange(3))
@pytest.mark.parametrize(
"scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
[
("neg_mean_squared_error", 0.1, True, 5, 1e-7), # use scorer
("neg_mean_squared_error", None, True, 5, 1e-1), # use scorer on train
(None, 0.1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
("loss", 0.1, True, 5, 1e-7), # use loss
("loss", None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, 0.0), # no early stopping
],
)
def test_early_stopping_regression(
scoring, validation_fraction, early_stopping, n_iter_no_change, tol
):
max_iter = 200
X, y = make_regression(n_samples=50, random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0,
)
gb.fit(X, y)
if early_stopping:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
"data",
(
make_classification(n_samples=30, random_state=0),
make_classification(
n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0
),
),
)
@pytest.mark.parametrize(
"scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
[
("accuracy", 0.1, True, 5, 1e-7), # use scorer
("accuracy", None, True, 5, 1e-1), # use scorer on training data
(None, 0.1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
("loss", 0.1, True, 5, 1e-7), # use loss
("loss", None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, 0.0), # no early stopping
],
)
def test_early_stopping_classification(
data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol
):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0,
)
gb.fit(X, y)
if early_stopping is True:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),
(HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10001)),
],
)
def test_early_stopping_default(GradientBoosting, X, y):
# Test that early stopping is enabled by default if and only if there
# are more than 10000 samples
gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)
gb.fit(X, y)
if X.shape[0] > 10000:
assert gb.n_iter_ < gb.max_iter
else:
assert gb.n_iter_ == gb.max_iter
@pytest.mark.parametrize(
"scores, n_iter_no_change, tol, stopping",
[
([], 1, 0.001, False), # not enough iterations
([1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
([1] * 6, 5, 0.0, True), # no significant improvement
([1] * 6, 5, 0.001, True), # no significant improvement
([1] * 6, 5, 5, True), # no significant improvement
],
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol)
assert gbdt._should_stop(scores) == stopping
def test_absolute_error():
# For coverage only.
X, y = make_regression(n_samples=500, random_state=0)
gbdt = HistGradientBoostingRegressor(loss="absolute_error", random_state=0)
gbdt.fit(X, y)
assert gbdt.score(X, y) > 0.9
def test_absolute_error_sample_weight():
# non regression test for issue #19400
# make sure no error is thrown during fit of
# HistGradientBoostingRegressor with absolute_error loss function
# and passing sample_weight
rng = np.random.RandomState(0)
n_samples = 100
X = rng.uniform(-1, 1, size=(n_samples, 2))
y = rng.uniform(-1, 1, size=n_samples)
sample_weight = rng.uniform(0, 1, size=n_samples)
gbdt = HistGradientBoostingRegressor(loss="absolute_error")
gbdt.fit(X, y, sample_weight=sample_weight)
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
def test_poisson_y_positive(y):
# Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.
err_msg = r"loss='poisson' requires non-negative y and sum\(y\) > 0."
gbdt = HistGradientBoostingRegressor(loss="poisson", random_state=0)
with pytest.raises(ValueError, match=err_msg):
gbdt.fit(np.zeros(shape=(len(y), 1)), y)
def test_poisson():
# For Poisson distributed target, Poisson loss should give better results
# than least squares measured in Poisson deviance as metric.
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 100, 100
X = make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
gbdt_pois = HistGradientBoostingRegressor(loss="poisson", random_state=rng)
gbdt_ls = HistGradientBoostingRegressor(loss="squared_error", random_state=rng)
gbdt_pois.fit(X_train, y_train)
gbdt_ls.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y in [(X_train, y_train), (X_test, y_test)]:
metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))
# squared_error might produce non-positive predictions => clip
metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None))
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
assert metric_pois < metric_ls
assert metric_pois < metric_dummy
def test_binning_train_validation_are_separated():
# Make sure training and validation data are binned separately.
# See issue 13926
rng = np.random.RandomState(0)
validation_fraction = 0.2
gb = HistGradientBoostingClassifier(
early_stopping=True, validation_fraction=validation_fraction, random_state=rng
)
gb.fit(X_classification, y_classification)
mapper_training_data = gb._bin_mapper
# Note that since the data is small there is no subsampling and the
# random_state doesn't matter
mapper_whole_data = _BinMapper(random_state=0)
mapper_whole_data.fit(X_classification)
n_samples = X_classification.shape[0]
assert np.all(
mapper_training_data.n_bins_non_missing_
== int((1 - validation_fraction) * n_samples)
)
assert np.all(
mapper_training_data.n_bins_non_missing_
!= mapper_whole_data.n_bins_non_missing_
)
def test_missing_values_trivial():
# sanity check for missing values support. With only one feature and
# y == isnan(X), the gbdt is supposed to reach perfect accuracy on the
# training set.
n_samples = 100
n_features = 1
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
mask = rng.binomial(1, 0.5, size=X.shape).astype(bool)
X[mask] = np.nan
y = mask.ravel()
gb = HistGradientBoostingClassifier()
gb.fit(X, y)
assert gb.score(X, y) == pytest.approx(1)
@pytest.mark.parametrize("problem", ("classification", "regression"))
@pytest.mark.parametrize(
"missing_proportion, expected_min_score_classification, "
"expected_min_score_regression",
[(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)],
)
def test_missing_values_resilience(
problem,
missing_proportion,
expected_min_score_classification,
expected_min_score_regression,
):
# Make sure the estimators can deal with missing values and still yield
# decent predictions
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
if problem == "regression":
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
random_state=rng,
)
gb = HistGradientBoostingRegressor()
expected_min_score = expected_min_score_regression
else:
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_repeated=0,
random_state=rng,
)
gb = HistGradientBoostingClassifier()
expected_min_score = expected_min_score_classification
mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool)
X[mask] = np.nan
gb.fit(X, y)
assert gb.score(X, y) > expected_min_score
@pytest.mark.parametrize(
"data",
[
make_classification(random_state=0, n_classes=2),
make_classification(random_state=0, n_classes=3, n_informative=3),
],
ids=["binary_crossentropy", "categorical_crossentropy"],
)
def test_zero_division_hessians(data):
# non regression test for issue #14018
# make sure we avoid zero division errors when computing the leaves values.
# If the learning rate is too high, the raw predictions are bad and will
# saturate the softmax (or sigmoid in binary classif). This leads to
# probabilities being exactly 0 or 1, gradients being constant, and
# hessians being zero.
X, y = data
gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)
gb.fit(X, y)
def test_small_trainset():
# Make sure that the small trainset is stratified and has the expected
# length (10k samples)
n_samples = 20000
original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
rng = np.random.RandomState(42)
X = rng.randn(n_samples).reshape(n_samples, 1)
y = [
[class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items()
]
y = shuffle(np.concatenate(y))
gb = HistGradientBoostingClassifier()
# Compute the small training set
X_small, y_small, _ = gb._get_small_trainset(
X, y, seed=42, sample_weight_train=None
)
# Compute the class distribution in the small training set
unique, counts = np.unique(y_small, return_counts=True)
small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)}
# Test that the small training set has the expected length
assert X_small.shape[0] == 10000
assert y_small.shape[0] == 10000
# Test that the class distributions in the whole dataset and in the small
# training set are identical
assert small_distrib == pytest.approx(original_distrib)
def test_missing_values_minmax_imputation():
# Compare the buit-in missing value handling of Histogram GBC with an
# a-priori missing value imputation strategy that should yield the same
# results in terms of decision function.
#
# Each feature (containing NaNs) is replaced by 2 features:
# - one where the nans are replaced by min(feature) - 1
# - one where the nans are replaced by max(feature) + 1
# A split where nans go to the left has an equivalent split in the
# first (min) feature, and a split where nans go to the right has an
# equivalent split in the second (max) feature.
#
# Assuming the data is such that there is never a tie to select the best
# feature to split on during training, the learned decision trees should be
# strictly equivalent (learn a sequence of splits that encode the same
# decision function).
#
# The MinMaxImputer transformer is meant to be a toy implementation of the
# "Missing In Attributes" (MIA) missing value handling for decision trees
# https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305
# The implementation of MIA as an imputation transformer was suggested by
# "Remark 3" in https://arxiv.org/abs/1902.06931
class MinMaxImputer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
mm = MinMaxScaler().fit(X)
self.data_min_ = mm.data_min_
self.data_max_ = mm.data_max_
return self
def transform(self, X):
X_min, X_max = X.copy(), X.copy()
for feature_idx in range(X.shape[1]):
nan_mask = np.isnan(X[:, feature_idx])
X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
return np.concatenate([X_min, X_max], axis=1)
def make_missing_value_data(n_samples=int(1e4), seed=0):
rng = np.random.RandomState(seed)
X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng)
# Pre-bin the data to ensure a deterministic handling by the 2
# strategies and also make it easier to insert np.nan in a structured
# way:
X = KBinsDiscretizer(n_bins=42, encode="ordinal").fit_transform(X)
# First feature has missing values completely at random:
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
# Second and third features have missing values for extreme values
# (censoring missingness):
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
# Make the last feature nan pattern very informative:
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
# Check that there is at least one missing value in each feature:
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
# Let's use a test set to check that the learned decision function is
# the same as evaluated on unseen data. Otherwise it could just be the
# case that we find two independent ways to overfit the training set.
return train_test_split(X, y, random_state=rng)
# n_samples need to be large enough to minimize the likelihood of having
# several candidate splits with the same gain value in a given tree.
X_train, X_test, y_train, y_test = make_missing_value_data(
n_samples=int(1e4), seed=0
)
# Use a small number of leaf nodes and iterations so as to keep
# under-fitting models to minimize the likelihood of ties when training the
# model.
gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)
gbm1.fit(X_train, y_train)
gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
gbm2.fit(X_train, y_train)
# Check that the model reach the same score:
assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))
assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))
# Check the individual prediction match as a finer grained
# decision function check.
assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
def test_infinite_values():
# Basic test for infinite values
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)
gbdt.fit(X, y)
np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)
def test_consistent_lengths():
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
sample_weight = np.array([0.1, 0.3, 0.1])
gbdt = HistGradientBoostingRegressor()
with pytest.raises(ValueError, match=r"sample_weight.shape == \(3,\), expected"):
gbdt.fit(X, y, sample_weight)
with pytest.raises(
ValueError, match="Found input variables with inconsistent number"
):
gbdt.fit(X, y[1:])
def test_infinite_values_missing_values():
# High level test making sure that inf and nan values are properly handled
# when both are present. This is similar to
# test_split_on_nan_with_infinite_values() in test_grower.py, though we
# cannot check the predictions for binned values here.
X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)
y_isnan = np.isnan(X.ravel())
y_isinf = X.ravel() == np.inf
stump_clf = HistGradientBoostingClassifier(
min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2
)
assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1
assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1
def test_crossentropy_binary_problem():
# categorical_crossentropy should only be used if there are more than two
# classes present. PR #14869
X = [[1], [0]]
y = [0, 1]
gbrt = HistGradientBoostingClassifier(loss="categorical_crossentropy")
with pytest.raises(
ValueError, match="'categorical_crossentropy' is not suitable for"
):
gbrt.fit(X, y)
@pytest.mark.parametrize("scoring", [None, "loss"])
def test_string_target_early_stopping(scoring):
# Regression tests for #14709 where the targets need to be encoded before
# to compute the score
rng = np.random.RandomState(42)
X = rng.randn(100, 10)
y = np.array(["x"] * 50 + ["y"] * 50, dtype=object)
gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)
gbrt.fit(X, y)
def test_zero_sample_weights_regression():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingRegressor(min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert gb.predict([[1, 0]])[0] > 0.5
def test_zero_sample_weights_classification():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingClassifier(loss="binary_crossentropy", min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]]
y = [0, 0, 1, 0, 2]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1, 1]
gb = HistGradientBoostingClassifier(
loss="categorical_crossentropy", min_samples_leaf=1
)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
@pytest.mark.parametrize(
"problem", ("regression", "binary_classification", "multiclass_classification")
)
@pytest.mark.parametrize("duplication", ("half", "all"))
def test_sample_weight_effect(problem, duplication):
# High level test to make sure that duplicating a sample is equivalent to
# giving it weight of 2.
# fails for n_samples > 255 because binning does not take sample weights
# into account. Keeping n_samples <= 255 makes
# sure only unique values are used so SW have no effect on binning.
n_samples = 255
n_features = 2
if problem == "regression":
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
random_state=0,
)
Klass = HistGradientBoostingRegressor
else:
n_classes = 2 if problem == "binary_classification" else 3
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_clusters_per_class=1,
n_classes=n_classes,
random_state=0,
)
Klass = HistGradientBoostingClassifier
# This test can't pass if min_samples_leaf > 1 because that would force 2
# samples to be in the same node in est_sw, while these samples would be
# free to be separate in est_dup: est_dup would just group together the
# duplicated samples.
est = Klass(min_samples_leaf=1)
# Create dataset with duplicate and corresponding sample weights
if duplication == "half":
lim = n_samples // 2
else:
lim = n_samples
X_dup = np.r_[X, X[:lim]]
y_dup = np.r_[y, y[:lim]]
sample_weight = np.ones(shape=(n_samples))
sample_weight[:lim] = 2
est_sw = clone(est).fit(X, y, sample_weight=sample_weight)
est_dup = clone(est).fit(X_dup, y_dup)
# checking raw_predict is stricter than just predict for classification
assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup))
@pytest.mark.parametrize("loss_name", ("squared_error", "absolute_error"))
def test_sum_hessians_are_sample_weight(loss_name):
# For losses with constant hessians, the sum_hessians field of the
# histograms must be equal to the sum of the sample weight of samples at
# the corresponding bin.
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng)
bin_mapper = _BinMapper()
X_binned = bin_mapper.fit_transform(X)
sample_weight = rng.normal(size=n_samples)
loss = _LOSSES[loss_name](sample_weight=sample_weight, n_threads=n_threads)
gradients, hessians = loss.init_gradients_and_hessians(
n_samples=n_samples, prediction_dim=1, sample_weight=sample_weight
)
raw_predictions = rng.normal(size=(1, n_samples))
loss.update_gradients_and_hessians(
gradients, hessians, y, raw_predictions, sample_weight
)
# build sum_sample_weight which contains the sum of the sample weights at
# each bin (for each feature). This must be equal to the sum_hessians
# field of the corresponding histogram
sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))
for feature_idx in range(n_features):
for sample_idx in range(n_samples):
sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[
sample_idx
]
# Build histogram
grower = TreeGrower(X_binned, gradients[0], hessians[0], n_bins=bin_mapper.n_bins)
histograms = grower.histogram_builder.compute_histograms_brute(
grower.root.sample_indices
)
for feature_idx in range(n_features):
for bin_idx in range(bin_mapper.n_bins):
assert histograms[feature_idx, bin_idx]["sum_hessians"] == (
pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5)
)
def test_max_depth_max_leaf_nodes():
# Non regression test for
# https://github.com/scikit-learn/scikit-learn/issues/16179
# there was a bug when the max_depth and the max_leaf_nodes criteria were
# met at the same time, which would lead to max_leaf_nodes not being
# respected.
X, y = make_classification(random_state=0)
est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit(
X, y
)
tree = est._predictors[0][0]
assert tree.get_max_depth() == 2
assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix
def test_early_stopping_on_test_set_with_warm_start():
# Non regression test for #16661 where second fit fails with
# warm_start=True, early_stopping is on, and no validation set
X, y = make_classification(random_state=0)
gb = HistGradientBoostingClassifier(
max_iter=1,
scoring="loss",
warm_start=True,
early_stopping=True,
n_iter_no_change=1,
validation_fraction=None,
)
gb.fit(X, y)
# does not raise on second call
gb.set_params(max_iter=2)
gb.fit(X, y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_single_node_trees(Est):
# Make sure it's still possible to build single-node trees. In that case
# the value of the root is set to 0. That's a correct value: if the tree is
# single-node that's because min_gain_to_split is not respected right from
# the root, so we don't want the tree to have any impact on the
# predictions.
X, y = make_classification(random_state=0)
y[:] = 1 # constant target will lead to a single root node
est = Est(max_iter=20)
est.fit(X, y)
assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors)
assert all(predictor[0].nodes[0]["value"] == 0 for predictor in est._predictors)
# Still gives correct predictions thanks to the baseline prediction
assert_allclose(est.predict(X), y)
@pytest.mark.parametrize(
"Est, loss, X, y",
[
(
HistGradientBoostingClassifier,
BinaryCrossEntropy(sample_weight=None),
X_classification,
y_classification,
),
(
HistGradientBoostingRegressor,
LeastSquares(sample_weight=None),
X_regression,
y_regression,
),
],
)
def test_custom_loss(Est, loss, X, y):
est = Est(loss=loss, max_iter=20)
est.fit(X, y)
@pytest.mark.parametrize(
"HistGradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
(
HistGradientBoostingClassifier,
X_multi_classification,
y_multi_classification,
),
],
)
def test_staged_predict(HistGradientBoosting, X, y):
# Test whether staged predictor eventually gives
# the same prediction.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0
)
gb = HistGradientBoosting(max_iter=10)
# test raise NotFittedError if not fitted
with pytest.raises(NotFittedError):
next(gb.staged_predict(X_test))
gb.fit(X_train, y_train)
# test if the staged predictions of each iteration
# are equal to the corresponding predictions of the same estimator
# trained from scratch.
# this also test limit case when max_iter = 1
method_names = (
["predict"]
if is_regressor(gb)
else ["predict", "predict_proba", "decision_function"]
)
for method_name in method_names:
staged_method = getattr(gb, "staged_" + method_name)
staged_predictions = list(staged_method(X_test))
assert len(staged_predictions) == gb.n_iter_
for n_iter, staged_predictions in enumerate(staged_method(X_test), 1):
aux = HistGradientBoosting(max_iter=n_iter)
aux.fit(X_train, y_train)
pred_aux = getattr(aux, method_name)(X_test)
assert_allclose(staged_predictions, pred_aux)
assert staged_predictions.shape == pred_aux.shape
@pytest.mark.parametrize("insert_missing", [False, True])
@pytest.mark.parametrize(
"Est", (HistGradientBoostingRegressor, HistGradientBoostingClassifier)
)
@pytest.mark.parametrize("bool_categorical_parameter", [True, False])
def test_unknown_categories_nan(insert_missing, Est, bool_categorical_parameter):
# Make sure no error is raised at predict if a category wasn't seen during
# fit. We also make sure they're treated as nans.
rng = np.random.RandomState(0)
n_samples = 1000
f1 = rng.rand(n_samples)
f2 = rng.randint(4, size=n_samples)
X = np.c_[f1, f2]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
if bool_categorical_parameter:
categorical_features = [False, True]
else:
categorical_features = [1]
if insert_missing:
mask = rng.binomial(1, 0.01, size=X.shape).astype(bool)
assert mask.sum() > 0
X[mask] = np.nan
est = Est(max_iter=20, categorical_features=categorical_features).fit(X, y)
assert_array_equal(est.is_categorical_, [False, True])
# Make sure no error is raised on unknown categories and nans
# unknown categories will be treated as nans
X_test = np.zeros((10, X.shape[1]), dtype=float)
X_test[:5, 1] = 30
X_test[5:, 1] = np.nan
assert len(np.unique(est.predict(X_test))) == 1
def test_categorical_encoding_strategies():
# Check native categorical handling vs different encoding strategies. We
# make sure that native encoding needs only 1 split to achieve a perfect
# prediction on a simple dataset. In contrast, OneHotEncoded data needs
# more depth / splits, and treating categories as ordered (just using
# OrdinalEncoder) requires even more depth.
# dataset with one random continuous feature, and one categorical feature
# with values in [0, 5], e.g. from an OrdinalEncoder.
# class == 1 iff categorical value in {0, 2, 4}
rng = np.random.RandomState(0)
n_samples = 10_000
f1 = rng.rand(n_samples)
f2 = rng.randint(6, size=n_samples)
X = np.c_[f1, f2]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
# make sure dataset is balanced so that the baseline_prediction doesn't
# influence predictions too much with max_iter = 1
assert 0.49 < y.mean() < 0.51
clf_cat = HistGradientBoostingClassifier(
max_iter=1, max_depth=1, categorical_features=[False, True]
)
# Using native categorical encoding, we get perfect predictions with just
# one split
assert cross_val_score(clf_cat, X, y).mean() == 1
# quick sanity check for the bitset: 0, 2, 4 = 2**0 + 2**2 + 2**4 = 21
expected_left_bitset = [21, 0, 0, 0, 0, 0, 0, 0]
left_bitset = clf_cat.fit(X, y)._predictors[0][0].raw_left_cat_bitsets[0]
assert_array_equal(left_bitset, expected_left_bitset)
# Treating categories as ordered, we need more depth / more splits to get
# the same predictions
clf_no_cat = HistGradientBoostingClassifier(
max_iter=1, max_depth=4, categorical_features=None
)
assert cross_val_score(clf_no_cat, X, y).mean() < 0.9
clf_no_cat.set_params(max_depth=5)
assert cross_val_score(clf_no_cat, X, y).mean() == 1
# Using OHEd data, we need less splits than with pure OEd data, but we
# still need more splits than with the native categorical splits
ct = make_column_transformer(
(OneHotEncoder(sparse=False), [1]), remainder="passthrough"
)
X_ohe = ct.fit_transform(X)
clf_no_cat.set_params(max_depth=2)
assert cross_val_score(clf_no_cat, X_ohe, y).mean() < 0.9
clf_no_cat.set_params(max_depth=3)
assert cross_val_score(clf_no_cat, X_ohe, y).mean() == 1
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
@pytest.mark.parametrize(
"categorical_features, monotonic_cst, expected_msg",
[
(
["hello", "world"],
None,
"categorical_features must be an array-like of bools or array-like of "
"ints.",
),
(
[0, -1],
None,
(
r"categorical_features set as integer indices must be in "
r"\[0, n_features - 1\]"
),
),
(
[True, True, False, False, True],
None,
r"categorical_features set as a boolean mask must have shape "
r"\(n_features,\)",
),
(
[True, True, False, False],
[0, -1, 0, 1],
"Categorical features cannot have monotonic constraints",
),
],
)
def test_categorical_spec_errors(
Est, categorical_features, monotonic_cst, expected_msg
):
# Test errors when categories are specified incorrectly
n_samples = 100
X, y = make_classification(random_state=0, n_features=4, n_samples=n_samples)
rng = np.random.RandomState(0)
X[:, 0] = rng.randint(0, 10, size=n_samples)
X[:, 1] = rng.randint(0, 10, size=n_samples)
est = Est(categorical_features=categorical_features, monotonic_cst=monotonic_cst)
with pytest.raises(ValueError, match=expected_msg):
est.fit(X, y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
@pytest.mark.parametrize("categorical_features", ([False, False], []))
@pytest.mark.parametrize("as_array", (True, False))
def test_categorical_spec_no_categories(Est, categorical_features, as_array):
# Make sure we can properly detect that no categorical features are present
# even if the categorical_features parameter is not None
X = np.arange(10).reshape(5, 2)
y = np.arange(5)
if as_array:
categorical_features = np.asarray(categorical_features)
est = Est(categorical_features=categorical_features).fit(X, y)
assert est.is_categorical_ is None
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_categorical_bad_encoding_errors(Est):
# Test errors when categories are encoded incorrectly
gb = Est(categorical_features=[True], max_bins=2)
X = np.array([[0, 1, 2]]).T
y = np.arange(3)
msg = "Categorical feature at index 0 is expected to have a cardinality <= 2"
with pytest.raises(ValueError, match=msg):
gb.fit(X, y)
X = np.array([[0, 2]]).T
y = np.arange(2)
msg = "Categorical feature at index 0 is expected to be encoded with values < 2"
with pytest.raises(ValueError, match=msg):
gb.fit(X, y)
# nans are ignored in the counts
X = np.array([[0, 1, np.nan]]).T
y = np.arange(3)
gb.fit(X, y)
@pytest.mark.parametrize(
"Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
)
def test_uint8_predict(Est):
# Non regression test for
# https://github.com/scikit-learn/scikit-learn/issues/18408
# Make sure X can be of dtype uint8 (i.e. X_BINNED_DTYPE) in predict. It
# will be converted to X_DTYPE.
rng = np.random.RandomState(0)
X = rng.randint(0, 100, size=(10, 2)).astype(np.uint8)
y = rng.randint(0, 2, size=10).astype(np.uint8)
est = Est()
est.fit(X, y)
est.predict(X)
# TODO: Remove in v1.2
@pytest.mark.parametrize(
"old_loss, new_loss",
[
("least_squares", "squared_error"),
("least_absolute_deviation", "absolute_error"),
],
)
def test_loss_deprecated(old_loss, new_loss):
X, y = make_regression(n_samples=50, random_state=0)
est1 = HistGradientBoostingRegressor(loss=old_loss, random_state=0)
with pytest.warns(FutureWarning, match=f"The loss '{old_loss}' was deprecated"):
est1.fit(X, y)
est2 = HistGradientBoostingRegressor(loss=new_loss, random_state=0)
est2.fit(X, y)
assert_allclose(est1.predict(X), est2.predict(X))
| 36.764107 | 88 | 0.675432 |
795816ce77c9f6ca8ab45275f63f59ed0679056e | 1,566 | py | Python | leaqi/envs/gym_structured_prediction/__init__.py | xkianteb/leaqi | 924435590e74421ed16488429056f26747c99421 | [
"MIT"
] | 12 | 2020-05-25T16:50:05.000Z | 2022-02-20T08:00:04.000Z | leaqi/envs/gym_structured_prediction/__init__.py | chenyangh/leaqi | 924435590e74421ed16488429056f26747c99421 | [
"MIT"
] | null | null | null | leaqi/envs/gym_structured_prediction/__init__.py | chenyangh/leaqi | 924435590e74421ed16488429056f26747c99421 | [
"MIT"
] | 4 | 2020-05-28T18:25:01.000Z | 2021-02-25T10:29:41.000Z | import logging
from gym.envs.registration import register
logger = logging.getLogger(__name__)
register(
id='Ner-v0',
entry_point='leaqi.envs.gym_structured_prediction.envs:StructuredPredictionEnv',
kwargs={'bert_model' : 'bert-base-cased',
'VOCAB': ('<PAD>', 'O', 'I-LOC', 'I-PER', 'I-ORG', 'I-MISC'),
'num_prev_actions': 1,
'update_interval': 1,
'ID': 'Ner'},
)
register(
id='Keyphrase-v0',
entry_point='leaqi.envs.gym_structured_prediction.envs:StructuredPredictionEnv',
kwargs={'bert_model' : f'scibert_scivocab_uncased',
'VOCAB': ('<PAD>', 'O', 'I-LOC', 'I-PER', 'I-ORG', 'I-MISC'),
'num_prev_actions': 1,
'update_interval': 1,
'ID': 'Keyphrase'},
)
register(
id='Pos-v0',
entry_point='leaqi.envs.gym_structured_prediction.envs:StructuredPredictionEnv',
kwargs={'bert_model': 'bert-base-multilingual-cased',
'VOCAB': ('<PAD>','ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'INTJ', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X'),
'num_prev_actions': 1,
'update_interval': 1,
'ID': 'Pos'},
# ADJ: adjective
# ADP: adposition
# ADV: adverb
# AUX: auxiliary
# CCONJ: coordinating conjunction
# DET: determiner
# INTJ: interjection
# NOUN: noun
# NUM: numeral
# PART: particle
# PRON: pronoun
# PROPN: proper noun
# PUNCT: punctuation
# SCONJ: subordinating conjunction
# SYM: symbol
# VERB: verb
# X: other
)
| 30.115385 | 160 | 0.591954 |
7958172c160a6cdf296ac6acb94550d7f4ab23f7 | 550 | py | Python | codewof/config/context_processors/deployed.py | uccser-admin/programming-practice-prototype | 3af4c7d85308ac5bb35bb13be3ec18cac4eb8308 | [
"MIT"
] | null | null | null | codewof/config/context_processors/deployed.py | uccser-admin/programming-practice-prototype | 3af4c7d85308ac5bb35bb13be3ec18cac4eb8308 | [
"MIT"
] | null | null | null | codewof/config/context_processors/deployed.py | uccser-admin/programming-practice-prototype | 3af4c7d85308ac5bb35bb13be3ec18cac4eb8308 | [
"MIT"
] | 1 | 2018-04-12T23:58:35.000Z | 2018-04-12T23:58:35.000Z | """Context processor for checking if in deployed environment."""
from django.conf import settings
def deployed(request):
"""Return a dictionary containing booleans and other info regarding deployed environment.
Returns:
Dictionary containing deployed booleans and other info to add to context.
"""
return {
"DEPLOYED": settings.DEPLOYED,
"PRODUCTION_ENVIRONMENT": settings.PRODUCTION_ENVIRONMENT,
"STAGING_ENVIRONMENT": settings.STAGING_ENVIRONMENT,
"DOMAIN": settings.CODEWOF_DOMAIN
}
| 30.555556 | 93 | 0.72 |
79581758902061c1ddacc509344a6684dd0b338f | 2,831 | py | Python | src/python/pants/backend/project_info/tasks/filedeps.py | dturner-tw/pants | 3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/project_info/tasks/filedeps.py | dturner-tw/pants | 3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/project_info/tasks/filedeps.py | dturner-tw/pants | 3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.build_environment import get_buildroot
from pants.task.console_task import ConsoleTask
class FileDeps(ConsoleTask):
"""List all source and BUILD files a target transitively depends on.
Files are listed with absolute paths and any BUILD files implied in the transitive closure of
targets are also included.
"""
@classmethod
def register_options(cls, register):
super(FileDeps, cls).register_options(register)
register('--globs', default=False, action='store_true',
help='Instead of outputting filenames, output globs (ignoring excludes)')
def console_output(self, targets):
concrete_targets = set()
for target in targets:
concrete_target = target.concrete_derived_from
concrete_targets.add(concrete_target)
# TODO(John Sirois): This hacks around ScalaLibraries' psuedo-deps on JavaLibraries. We've
# already tried to tuck away this hack by subclassing closure() in ScalaLibrary - but in this
# case that's not enough when a ScalaLibrary with java_sources is an interior node of the
# active context graph. This awkwardness should be eliminated when ScalaLibrary can point
# to a java source set as part of its 1st class sources.
if isinstance(concrete_target, ScalaLibrary):
concrete_targets.update(concrete_target.java_sources)
buildroot = get_buildroot()
files = set()
output_globs = self.get_options().globs
# Filter out any synthetic targets, which will not have a build_file attr.
concrete_targets = set([target for target in concrete_targets if not target.is_synthetic])
for target in concrete_targets:
files.add(target.address.build_file.full_path)
if output_globs or target.has_sources():
if output_globs:
globs_obj = target.globs_relative_to_buildroot()
if globs_obj:
files.update(os.path.join(buildroot, src) for src in globs_obj['globs'])
else:
files.update(os.path.join(buildroot, src) for src in target.sources_relative_to_buildroot())
# TODO(John Sirois): BundlePayload should expose its sources in a way uniform to
# SourcesPayload to allow this special-casing to go away.
if isinstance(target, JvmApp) and not output_globs:
files.update(itertools.chain(*[bundle.filemap.keys() for bundle in target.bundles]))
return files
| 44.936508 | 102 | 0.737195 |
795817f5474788a77ea94dfa04004d9ed1f20c98 | 5,952 | py | Python | jina/enums.py | alfred297/Pooja-AI | 0cfbd9d9eba52579fd73fd77d0d5e2c0f34d7653 | [
"Apache-2.0"
] | null | null | null | jina/enums.py | alfred297/Pooja-AI | 0cfbd9d9eba52579fd73fd77d0d5e2c0f34d7653 | [
"Apache-2.0"
] | null | null | null | jina/enums.py | alfred297/Pooja-AI | 0cfbd9d9eba52579fd73fd77d0d5e2c0f34d7653 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
"""
Miscellaneous enums used in jina
To use these enums in YAML config, following the example below:
.. highlight:: yaml
.. code-block:: yaml
!Flow
with:
logserver_config: yaml/test-server-config.yml
optimize_level: !FlowOptimizeLevel IGNORE_GATEWAY
# or
optimize_level: IGNORE_GATEWAY
#or
optimize_level: ignore_gateway
no_gateway: true
.. highlight:: yaml
.. code-block:: yaml
chunk_idx:
yaml_path: index/chunk.yml
replicas: $REPLICAS
separated_workspace: true
replicas_type: !PollingType ANY
# or
replicas_type: ANY
# or
replicas_type: any
"""
from enum import IntEnum, EnumMeta
class EnumType(EnumMeta):
def __new__(cls, *args, **kwargs):
_cls = super().__new__(cls, *args, **kwargs)
return cls.register_class(_cls)
@staticmethod
def register_class(cls):
reg_cls_set = getattr(cls, '_registered_class', set())
if cls.__name__ not in reg_cls_set:
# print('reg class: %s' % cls.__name__)
reg_cls_set.add(cls.__name__)
setattr(cls, '_registered_class', reg_cls_set)
from .helper import yaml
yaml.register_class(cls)
return cls
class BetterEnum(IntEnum, metaclass=EnumType):
def __str__(self):
return self.name
@classmethod
def from_string(cls, s: str):
"""Parse the enum from a string"""
try:
return cls[s.upper()]
except KeyError:
raise ValueError(f'{s.upper()} is not a valid enum for {cls}')
@classmethod
def to_yaml(cls, representer, data):
"""Required by :mod:`ruamel.yaml.constructor` """
return representer.represent_scalar('!' + cls.__name__, str(data))
@classmethod
def from_yaml(cls, constructor, node):
"""Required by :mod:`ruamel.yaml.constructor` """
return cls.from_string(node.value)
class SchedulerType(BetterEnum):
LOAD_BALANCE = 0 #: balance the workload between Peas, faster peas get more work
ROUND_ROBIN = 1 #: workload are scheduled round-robin manner to the peas, assuming all peas have uniform processing speed.
class PollingType(BetterEnum):
"""The enum for representing the parallel type of peas in a pod
"""
ANY = 1 #: one of the replica will receive the message
ALL = 2 #: all replica will receive the message, blocked until all done with the message
ALL_ASYNC = 3 #: (reserved) all replica will receive the message, but any one of them can return, useful in backup
@property
def is_push(self) -> bool:
"""
:return: if this :class:`PollingType` is using `push` protocol
"""
return self.value == 1
@property
def is_block(self) -> bool:
"""
:return: if this :class:`PollingType` is requiring `block` protocol
"""
return self.value == 2
class FlowOptimizeLevel(BetterEnum):
"""The level of flow optimization """
NONE = 0
IGNORE_GATEWAY = 1
FULL = 2
class LogVerbosity(BetterEnum):
"""Verbosity level of the logger """
DEBUG = 10
INFO = 20
SUCCESS = 25
WARNING = 30
ERROR = 40
CRITICAL = 50
class SocketType(BetterEnum):
"""Enums for representing the socket type in a pod """
PULL_BIND = 0
PULL_CONNECT = 1
PUSH_BIND = 2
PUSH_CONNECT = 3
SUB_BIND = 4
SUB_CONNECT = 5
PUB_BIND = 6
PUB_CONNECT = 7
PAIR_BIND = 8
PAIR_CONNECT = 9
ROUTER_BIND = 10
DEALER_CONNECT = 11
@property
def is_bind(self) -> bool:
"""
:return: if this socket is using `bind` protocol
"""
return self.value % 2 == 0
@property
def is_receive(self) -> bool:
"""
:return: if this socket is used for receiving data
"""
return self.value in {0, 1, 4, 5}
@property
def is_pubsub(self):
"""
:return: if this socket is used for publish or subscribe data
"""
return 4 <= self.value <= 7
@property
def paired(self) -> 'SocketType':
"""
:return: a paired
"""
return {
SocketType.PULL_BIND: SocketType.PUSH_CONNECT,
SocketType.PULL_CONNECT: SocketType.PUSH_BIND,
SocketType.SUB_BIND: SocketType.PUB_CONNECT,
SocketType.SUB_CONNECT: SocketType.PUB_BIND,
SocketType.PAIR_BIND: SocketType.PAIR_CONNECT,
SocketType.PUSH_CONNECT: SocketType.PULL_BIND,
SocketType.PUSH_BIND: SocketType.PULL_CONNECT,
SocketType.PUB_CONNECT: SocketType.SUB_BIND,
SocketType.PUB_BIND: SocketType.SUB_CONNECT,
SocketType.PAIR_CONNECT: SocketType.PAIR_BIND
}[self]
class FlowOutputType(BetterEnum):
"""The enum for representing flow output config """
SHELL_PROC = 0 #: a shell-script, run each microservice as a process
SHELL_DOCKER = 1 #: a shell-script, run each microservice as a container
DOCKER_SWARM = 2 #: a docker-swarm YAML config
K8S = 3 #: a Kubernetes YAML config
class FlowBuildLevel(BetterEnum):
"""The enum for representing a flow's build level
Some :class:`jina.flow.Flow` class functions require certain build level to run.
"""
EMPTY = 0 #: Nothing is built
GRAPH = 1 #: The underlying graph is built, you may visualize the flow
class PeaRoleType(BetterEnum):
""" The enum of a Pea role
"""
REPLICA = 0
HEAD = 1
TAIL = 2
SHARD = 3
SINGLETON = 4
class ClientMode(BetterEnum):
""" The enum of Client mode
"""
INDEX = 0
SEARCH = 1
TRAIN = 2
class OnErrorSkip(BetterEnum):
""" The level of error handling
"""
NONE = 0
EXECUTOR = 1
DRIVER = 2
HANDLE = 3
CALLBACK = 4
| 25.220339 | 127 | 0.623488 |
7958184efc0dc37cacf4846b7804d193b78db708 | 65,880 | py | Python | tests/test_sqlalchemy_data_layer.py | pysalt/flask-rest-jsonapi | a4bac73c2a18a658bfd9bab4b40f9880dafa737d | [
"MIT"
] | 1 | 2020-06-22T07:28:20.000Z | 2020-06-22T07:28:20.000Z | tests/test_sqlalchemy_data_layer.py | mahenzon/flask-rest-jsonapi | 725cf11286a1ea06aa2ca62182a212d03fceb1c8 | [
"MIT"
] | null | null | null | tests/test_sqlalchemy_data_layer.py | mahenzon/flask-rest-jsonapi | 725cf11286a1ea06aa2ca62182a212d03fceb1c8 | [
"MIT"
] | null | null | null | from urllib.parse import urlencode, parse_qs
import pytest
from sqlalchemy import create_engine, Column, Integer, DateTime, String, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from flask import Blueprint, make_response, json
from marshmallow_jsonapi.flask import Schema, Relationship
from marshmallow import Schema as MarshmallowSchema
from marshmallow_jsonapi import fields
from marshmallow import ValidationError
from flask_rest_jsonapi import Api, ResourceList, ResourceDetail, ResourceRelationship, JsonApiException
from flask_rest_jsonapi.pagination import add_pagination_links
from flask_rest_jsonapi.exceptions import RelationNotFound, InvalidSort, InvalidFilters, InvalidInclude, BadRequest
from flask_rest_jsonapi.querystring import QueryStringManager as QSManager
from flask_rest_jsonapi.data_layers.alchemy import SqlalchemyDataLayer
from flask_rest_jsonapi.data_layers.base import BaseDataLayer
from flask_rest_jsonapi.data_layers.filtering.alchemy import Node
import flask_rest_jsonapi.decorators
import flask_rest_jsonapi.resource
import flask_rest_jsonapi.schema
@pytest.fixture(scope="module")
def base():
yield declarative_base()
@pytest.fixture(scope="module")
def person_tag_model(base):
class Person_Tag(base):
__tablename__ = 'person_tag'
id = Column(Integer, ForeignKey('person.person_id'), primary_key=True, index=True)
key = Column(String, primary_key=True)
value = Column(String, primary_key=True)
yield Person_Tag
@pytest.fixture(scope="module")
def person_single_tag_model(base):
class Person_Single_Tag(base):
__tablename__ = 'person_single_tag'
id = Column(Integer, ForeignKey('person.person_id'), primary_key=True, index=True)
key = Column(String)
value = Column(String)
yield Person_Single_Tag
@pytest.fixture(scope="module")
def string_json_attribute_person_model(base):
"""
This approach to faking JSON support for testing with sqlite is borrowed from:
https://avacariu.me/articles/2016/compiling-json-as-text-for-sqlite-with-sqlalchemy
"""
import sqlalchemy.types as types
import simplejson as json
class StringyJSON(types.TypeDecorator):
"""Stores and retrieves JSON as TEXT."""
impl = types.TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
# TypeEngine.with_variant says "use StringyJSON instead when
# connecting to 'sqlite'"
try:
MagicJSON = types.JSON().with_variant(StringyJSON, 'sqlite')
except AttributeError:
from sqlalchemy.dialects.postgresql import JSON
MagicJSON = JSON().with_variant(StringyJSON, 'sqlite')
class StringJsonAttributePerson(base):
__tablename__ = 'string_json_attribute_person'
person_id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
birth_date = Column(DateTime)
# This model uses a String type for "json_tags" to avoid dependency on a nonstandard SQL type in testing, \
# while still demonstrating support
address = Column(MagicJSON)
yield StringJsonAttributePerson
@pytest.fixture(scope="module")
def person_model(base):
class Person(base):
__tablename__ = 'person'
person_id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
birth_date = Column(DateTime)
computers = relationship("Computer", backref="person")
tags = relationship("Person_Tag", cascade="save-update, merge, delete, delete-orphan")
single_tag = relationship("Person_Single_Tag", uselist=False, cascade="save-update, merge, delete, delete-orphan")
computers_owned = relationship("Computer")
yield Person
@pytest.fixture(scope="module")
def computer_model(base):
class Computer(base):
__tablename__ = 'computer'
id = Column(Integer, primary_key=True)
serial = Column(String, nullable=False)
person_id = Column(Integer, ForeignKey('person.person_id'))
yield Computer
@pytest.fixture(scope="module")
def engine(person_tag_model, person_single_tag_model, person_model, computer_model, string_json_attribute_person_model):
engine = create_engine("sqlite:///:memory:")
person_tag_model.metadata.create_all(engine)
person_single_tag_model.metadata.create_all(engine)
person_model.metadata.create_all(engine)
computer_model.metadata.create_all(engine)
string_json_attribute_person_model.metadata.create_all(engine)
return engine
@pytest.fixture(scope="module")
def session(engine):
Session = sessionmaker(bind=engine)
return Session()
@pytest.fixture()
def person(session, person_model):
person_ = person_model(name='test')
session_ = session
session_.add(person_)
session_.commit()
yield person_
session_.delete(person_)
session_.commit()
@pytest.fixture()
def person_2(session, person_model):
person_ = person_model(name='test2')
session_ = session
session_.add(person_)
session_.commit()
yield person_
session_.delete(person_)
session_.commit()
@pytest.fixture()
def computer(session, computer_model):
computer_ = computer_model(serial='1')
session_ = session
session_.add(computer_)
session_.commit()
yield computer_
session_.delete(computer_)
session_.commit()
@pytest.fixture(scope="module")
def dummy_decorator():
def deco(f):
def wrapper_f(*args, **kwargs):
return f(*args, **kwargs)
return wrapper_f
yield deco
@pytest.fixture(scope="module")
def person_tag_schema():
class PersonTagSchema(MarshmallowSchema):
class Meta:
type_ = 'person_tag'
id = fields.Str(dump_only=True, load_only=True)
key = fields.Str()
value = fields.Str()
yield PersonTagSchema
@pytest.fixture(scope="module")
def person_single_tag_schema():
class PersonSingleTagSchema(MarshmallowSchema):
class Meta:
type_ = 'person_single_tag'
id = fields.Str(dump_only=True, load_only=True)
key = fields.Str()
value = fields.Str()
yield PersonSingleTagSchema
@pytest.fixture(scope="module")
def address_schema():
class AddressSchema(MarshmallowSchema):
street = fields.String(required=True)
city = fields.String(required=True)
state = fields.String(missing='NC')
zip = fields.String(required=True)
yield AddressSchema
@pytest.fixture(scope="module")
def string_json_attribute_person_schema(address_schema):
class StringJsonAttributePersonSchema(Schema):
class Meta:
type_ = 'string_json_attribute_person'
self_view = 'api.string_json_attribute_person_detail'
self_view_kwargs = {'person_id': '<id>'}
id = fields.Integer(as_string=True, dump_only=True, attribute='person_id')
name = fields.Str(required=True)
birth_date = fields.DateTime()
address = fields.Nested(address_schema, many=False)
yield StringJsonAttributePersonSchema
@pytest.fixture(scope="module")
def person_schema(person_tag_schema, person_single_tag_schema):
class PersonSchema(Schema):
class Meta:
type_ = 'person'
self_view = 'api.person_detail'
self_view_kwargs = {'person_id': '<id>'}
id = fields.Integer(as_string=True, attribute='person_id')
name = fields.Str(required=True)
birth_date = fields.DateTime()
computers = Relationship(related_view='api.computer_list',
related_view_kwargs={'person_id': '<person_id>'},
schema='ComputerSchema',
type_='computer',
many=True)
tags = fields.Nested(person_tag_schema, many=True)
single_tag = fields.Nested(person_single_tag_schema)
computers_owned = Relationship(related_view='api.computer_list',
related_view_kwargs={'person_id': '<person_id>'},
schema='ComputerSchema',
type_='computer',
many=True)
yield PersonSchema
@pytest.fixture(scope="module")
def computer_schema():
class ComputerSchema(Schema):
class Meta:
type_ = 'computer'
self_view = 'api.computer_detail'
self_view_kwargs = {'id': '<id>'}
id = fields.Integer(as_string=True, dump_only=True)
serial = fields.Str(required=True)
owner = Relationship(attribute='person',
default=None,
missing=None,
related_view='api.person_detail',
related_view_kwargs={'person_id': '<person.person_id>'},
schema='PersonSchema',
id_field='person_id',
type_='person')
yield ComputerSchema
@pytest.fixture(scope="module")
def before_create_object():
def before_create_object_(self, data, view_kwargs):
pass
yield before_create_object_
@pytest.fixture(scope="module")
def before_update_object():
def before_update_object_(self, obj, data, view_kwargs):
pass
yield before_update_object_
@pytest.fixture(scope="module")
def before_delete_object():
def before_delete_object_(self, obj, view_kwargs):
pass
yield before_delete_object_
@pytest.fixture(scope="module")
def person_list(session, person_model, dummy_decorator, person_schema, before_create_object):
class PersonList(ResourceList):
schema = person_schema
data_layer = {'model': person_model,
'session': session,
'mzthods': {'before_create_object': before_create_object}}
get_decorators = [dummy_decorator]
post_decorators = [dummy_decorator]
get_schema_kwargs = dict()
post_schema_kwargs = dict()
yield PersonList
@pytest.fixture(scope="module")
def person_detail(session, person_model, dummy_decorator, person_schema, before_update_object, before_delete_object):
class PersonDetail(ResourceDetail):
schema = person_schema
data_layer = {'model': person_model,
'session': session,
'url_field': 'person_id',
'methods': {'before_update_object': before_update_object,
'before_delete_object': before_delete_object}}
get_decorators = [dummy_decorator]
patch_decorators = [dummy_decorator]
delete_decorators = [dummy_decorator]
get_schema_kwargs = dict()
patch_schema_kwargs = dict()
delete_schema_kwargs = dict()
yield PersonDetail
@pytest.fixture(scope="module")
def person_computers(session, person_model, dummy_decorator, person_schema):
class PersonComputersRelationship(ResourceRelationship):
schema = person_schema
data_layer = {'session': session,
'model': person_model,
'url_field': 'person_id'}
get_decorators = [dummy_decorator]
post_decorators = [dummy_decorator]
patch_decorators = [dummy_decorator]
delete_decorators = [dummy_decorator]
yield PersonComputersRelationship
@pytest.fixture(scope="module")
def person_list_raise_jsonapiexception():
class PersonList(ResourceList):
def get(self):
raise JsonApiException('', '')
yield PersonList
@pytest.fixture(scope="module")
def person_list_raise_exception():
class PersonList(ResourceList):
def get(self):
raise Exception()
yield PersonList
@pytest.fixture(scope="module")
def person_list_response():
class PersonList(ResourceList):
def get(self):
return make_response('')
yield PersonList
@pytest.fixture(scope="module")
def person_list_without_schema(session, person_model):
class PersonList(ResourceList):
data_layer = {'model': person_model,
'session': session}
def get(self):
return make_response('')
yield PersonList
@pytest.fixture(scope="module")
def query():
def query_(self, view_kwargs):
if view_kwargs.get('person_id') is not None:
return self.session.query(computer_model).join(person_model).filter_by(person_id=view_kwargs['person_id'])
return self.session.query(computer_model)
yield query_
@pytest.fixture(scope="module")
def computer_list(session, computer_model, computer_schema, query):
class ComputerList(ResourceList):
schema = computer_schema
data_layer = {'model': computer_model,
'session': session,
'methods': {'query': query}}
yield ComputerList
@pytest.fixture(scope="module")
def computer_detail(session, computer_model, dummy_decorator, computer_schema):
class ComputerDetail(ResourceDetail):
schema = computer_schema
data_layer = {'model': computer_model,
'session': session}
methods = ['GET', 'PATCH']
yield ComputerDetail
@pytest.fixture(scope="module")
def computer_owner(session, computer_model, dummy_decorator, computer_schema):
class ComputerOwnerRelationship(ResourceRelationship):
schema = computer_schema
data_layer = {'session': session,
'model': computer_model}
yield ComputerOwnerRelationship
@pytest.fixture(scope="module")
def string_json_attribute_person_detail(session, string_json_attribute_person_model, string_json_attribute_person_schema):
class StringJsonAttributePersonDetail(ResourceDetail):
schema = string_json_attribute_person_schema
data_layer = {'session': session,
'model': string_json_attribute_person_model}
yield StringJsonAttributePersonDetail
@pytest.fixture(scope="module")
def string_json_attribute_person_list(session, string_json_attribute_person_model, string_json_attribute_person_schema):
class StringJsonAttributePersonList(ResourceList):
schema = string_json_attribute_person_schema
data_layer = {'session': session,
'model': string_json_attribute_person_model}
yield StringJsonAttributePersonList
@pytest.fixture(scope="module")
def api_blueprint(client):
bp = Blueprint('api', __name__)
yield bp
@pytest.fixture(scope="module")
def register_routes(client, app, api_blueprint, person_list, person_detail, person_computers,
person_list_raise_jsonapiexception, person_list_raise_exception, person_list_response,
person_list_without_schema, computer_list, computer_detail, computer_owner,
string_json_attribute_person_detail, string_json_attribute_person_list):
api = Api(blueprint=api_blueprint)
api.route(person_list, 'person_list', '/persons')
api.route(person_detail, 'person_detail', '/persons/<int:person_id>')
api.route(person_computers, 'person_computers', '/persons/<int:person_id>/relationships/computers')
api.route(person_computers, 'person_computers_owned', '/persons/<int:person_id>/relationships/computers-owned')
api.route(person_computers, 'person_computers_error', '/persons/<int:person_id>/relationships/computer')
api.route(person_list_raise_jsonapiexception, 'person_list_jsonapiexception', '/persons_jsonapiexception')
api.route(person_list_raise_exception, 'person_list_exception', '/persons_exception')
api.route(person_list_response, 'person_list_response', '/persons_response')
api.route(person_list_without_schema, 'person_list_without_schema', '/persons_without_schema')
api.route(computer_list, 'computer_list', '/computers', '/persons/<int:person_id>/computers')
api.route(computer_list, 'computer_detail', '/computers/<int:id>')
api.route(computer_owner, 'computer_owner', '/computers/<int:id>/relationships/owner')
api.route(string_json_attribute_person_list, 'string_json_attribute_person_list', '/string_json_attribute_persons')
api.route(string_json_attribute_person_detail, 'string_json_attribute_person_detail',
'/string_json_attribute_persons/<int:person_id>')
api.init_app(app)
@pytest.fixture(scope="module")
def get_object_mock():
class get_object(object):
foo = type('foo', (object,), {
'property': type('prop', (object,), {
'mapper': type('map', (object,), {
'class_': 'test'
})()
})()
})()
def __init__(self, kwargs):
pass
return get_object
def test_add_pagination_links(app):
with app.app_context():
qs = {'page[number]': '2', 'page[size]': '10'}
qsm = QSManager(qs, None)
pagination_dict = dict()
add_pagination_links(pagination_dict, 43, qsm, str())
last_page_dict = parse_qs(pagination_dict['links']['last'][1:])
assert len(last_page_dict['page[number]']) == 1
assert last_page_dict['page[number]'][0] == '5'
def test_Node(person_model, person_schema, monkeypatch):
from copy import deepcopy
filt = {
'val': '0000',
'field': True,
'not': dict(),
'name': 'name',
'op': 'eq',
'strip': lambda: 's'
}
filt['not'] = deepcopy(filt)
del filt['not']['not']
n = Node(person_model,
filt,
None,
person_schema)
with pytest.raises(TypeError):
# print(n.val is None and n.field is None)
# # n.column
n.resolve()
with pytest.raises(AttributeError):
n.model = None
n.column
with pytest.raises(InvalidFilters):
n.model = person_model
n.filter_['op'] = ''
n.operator
with pytest.raises(InvalidFilters):
n.related_model
with pytest.raises(InvalidFilters):
n.related_schema
def test_check_method_requirements(monkeypatch):
self = type('self', (object,), dict())
request = type('request', (object,), dict(method='GET'))
monkeypatch.setattr(flask_rest_jsonapi.decorators, 'request', request)
with pytest.raises(Exception):
flask_rest_jsonapi.decorators.check_method_requirements(lambda: 1)(self())
def test_json_api_exception():
JsonApiException(None, None, title='test', status='test')
def test_query_string_manager(person_schema):
query_string = {'page[slumber]': '3'}
qsm = QSManager(query_string, person_schema)
with pytest.raises(BadRequest):
qsm.pagination
qsm.qs['sort'] = 'computers'
with pytest.raises(InvalidSort):
qsm.sorting
def test_resource(app, person_model, person_schema, session, monkeypatch):
def schema_load_mock(*args):
raise ValidationError(dict(errors=[dict(status=None, title=None)]))
with app.app_context():
query_string = {'page[slumber]': '3'}
app = type('app', (object,), dict(config=dict(DEBUG=True)))
headers = {'Content-Type': 'application/vnd.api+json'}
request = type('request', (object,), dict(method='POST',
headers=headers,
get_json=dict,
args=query_string))
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
rl = ResourceList()
rd = ResourceDetail()
rl._data_layer = dl
rl.schema = person_schema
rd._data_layer = dl
rd.schema = person_schema
monkeypatch.setattr(flask_rest_jsonapi.resource, 'request', request)
monkeypatch.setattr(flask_rest_jsonapi.decorators, 'current_app', app)
monkeypatch.setattr(flask_rest_jsonapi.decorators, 'request', request)
monkeypatch.setattr(rl.schema, 'load', schema_load_mock)
r = super(flask_rest_jsonapi.resource.Resource, ResourceList)\
.__new__(ResourceList)
with pytest.raises(Exception):
r.dispatch_request()
rl.post()
rd.patch()
def test_compute_schema(person_schema):
query_string = {'page[number]': '3', 'fields[person]': list()}
qsm = QSManager(query_string, person_schema)
with pytest.raises(InvalidInclude):
flask_rest_jsonapi.schema.compute_schema(person_schema, dict(), qsm, ['id'])
flask_rest_jsonapi.schema.compute_schema(person_schema, dict(only=list()), qsm, list())
def test_compute_schema_propagate_context(person_schema, computer_schema):
query_string = {}
qsm = QSManager(query_string, person_schema)
schema = flask_rest_jsonapi.schema.compute_schema(person_schema, dict(), qsm, ['computers'])
assert schema.declared_fields['computers'].__dict__['_Relationship__schema'].__dict__['context'] == dict()
schema = flask_rest_jsonapi.schema.compute_schema(person_schema, dict(context=dict(foo='bar')), qsm, ['computers'])
assert schema.declared_fields['computers'].__dict__['_Relationship__schema'].__dict__['context'] == dict(foo='bar')
# test good cases
def test_get_list(client, register_routes, person, person_2):
with client:
querystring = urlencode({'page[number]': 1,
'page[size]': 1,
'fields[person]': 'name,birth_date',
'sort': '-name',
'include': 'computers.owner',
'filter': json.dumps(
[
{
'and': [
{
'name': 'computers',
'op': 'any',
'val': {
'name': 'serial',
'op': 'eq',
'val': '0000'
}
},
{
'or': [
{
'name': 'name',
'op': 'like',
'val': '%test%'
},
{
'name': 'name',
'op': 'like',
'val': '%test2%'
}
]
}
]
}
])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_list_with_simple_filter(client, register_routes, person, person_2):
with client:
querystring = urlencode({'page[number]': 1,
'page[size]': 1,
'fields[person]': 'name,birth_date',
'sort': '-name',
'filter[name]': 'test'
})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_list_disable_pagination(client, register_routes):
with client:
querystring = urlencode({'page[size]': 0})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 200
def test_head_list(client, register_routes):
with client:
response = client.head('/persons', content_type='application/vnd.api+json')
assert response.status_code == 200
def test_post_list(client, register_routes, computer):
payload = {
'data': {
'type': 'person',
'attributes': {
'name': 'test'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.post('/persons', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 201
def test_post_list_nested_no_join(client, register_routes, computer):
payload = {
'data': {
'type': 'string_json_attribute_person',
'attributes': {
'name': 'test_name',
'address': {
'street': 'test_street',
'city': 'test_city',
'state': 'NC',
'zip': '00000'
}
}
}
}
with client:
response = client.post('/string_json_attribute_persons', data=json.dumps(payload), content_type='application/vnd.api+json')
print(response.get_data())
assert response.status_code == 201
assert json.loads(response.get_data())['data']['attributes']['address']['street'] == 'test_street'
def test_post_list_nested(client, register_routes, computer):
payload = {
'data': {
'type': 'person',
'attributes': {
'name': 'test',
'tags': [
{'key': 'k1', 'value': 'v1'},
{'key': 'k2', 'value': 'v2'}
]
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.post('/persons', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 201
assert json.loads(response.get_data())['data']['attributes']['tags'][0]['key'] == 'k1'
def test_post_list_single(client, register_routes, person):
payload = {
'data': {
'type': 'computer',
'attributes': {
'serial': '1'
},
'relationships': {
'owner': {
'data': {
'type': 'person',
'id': str(person.person_id)
}
}
}
}
}
with client:
response = client.post('/computers', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 201
def test_get_detail(client, register_routes, person):
with client:
response = client.get('/persons/' + str(person.person_id), content_type='application/vnd.api+json')
assert response.status_code == 200
def test_patch_detail(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id),
'type': 'person',
'attributes': {
'name': 'test2'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_patch_detail_nested(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id),
'type': 'person',
'attributes': {
'name': 'test2',
'tags': [
{'key': 'new_key', 'value': 'new_value'}
],
'single_tag': {'key': 'new_single_key', 'value': 'new_single_value'}
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
response_dict = json.loads(response.get_data())
assert response_dict['data']['attributes']['tags'][0]['key'] == 'new_key'
assert response_dict['data']['attributes']['single_tag']['key'] == 'new_single_key'
def test_delete_detail(client, register_routes, person):
with client:
response = client.delete('/persons/' + str(person.person_id), content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_relationship(session, client, register_routes, computer, person):
session_ = session
person.computers = [computer]
session_.commit()
with client:
response = client.get('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_relationship_empty(client, register_routes, person):
with client:
response = client.get('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_relationship_single(session, client, register_routes, computer, person):
session_ = session
computer.person = person
session_.commit()
with client:
response = client.get('/computers/' + str(computer.id) + '/relationships/owner',
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_relationship_single_empty(session, client, register_routes, computer):
with client:
response = client.get('/computers/' + str(computer.id) + '/relationships/owner',
content_type='application/vnd.api+json')
response_json = json.loads(response.get_data())
assert None is response_json['data']
assert response.status_code == 200
def test_issue_49(session, client, register_routes, person, person_2):
with client:
for p in [person, person_2]:
response = client.get('/persons/' + str(p.person_id) + '/relationships/computers?include=computers',
content_type='application/vnd.api+json')
assert response.status_code == 200
assert (json.loads(response.get_data()))['links']['related'] == '/persons/' + str(p.person_id) + '/computers'
def test_post_relationship(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_post_relationship_not_list(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person',
'id': str(person.person_id)
}
}
with client:
response = client.post('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_patch_relationship(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_patch_relationship_single(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person',
'id': str(person.person_id)
}
}
with client:
response = client.patch('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_delete_relationship(session, client, register_routes, computer, person):
session_ = session
person.computers = [computer]
session_.commit()
payload = {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_delete_relationship_single(session, client, register_routes, computer, person):
session_ = session
computer.person = person
session_.commit()
payload = {
'data': {
'type': 'person',
'id': str(person.person_id)
}
}
with client:
response = client.delete('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_list_response(client, register_routes):
with client:
response = client.get('/persons_response', content_type='application/vnd.api+json')
assert response.status_code == 200
# test various Accept headers
def test_single_accept_header(client, register_routes):
with client:
response = client.get(
'/persons',
content_type='application/vnd.api+json',
headers={'Accept': 'application/vnd.api+json'}
)
assert response.status_code == 200
def test_multiple_accept_header(client, register_routes):
with client:
response = client.get(
'/persons',
content_type='application/vnd.api+json',
headers={'Accept': '*/*, application/vnd.api+json, application/vnd.api+json;q=0.9'}
)
assert response.status_code == 200
def test_wrong_accept_header(client, register_routes):
with client:
response = client.get(
'/persons',
content_type='application/vnd.api+json',
headers={'Accept': 'application/vnd.api+json;q=0.7, application/vnd.api+json;q=0.9'}
)
assert response.status_code == 406
# test Content-Type error
def test_wrong_content_type(client, register_routes):
with client:
response = client.post('/persons', headers={'Content-Type': 'application/vnd.api+json;q=0.8'})
assert response.status_code == 415
@pytest.fixture(scope="module")
def wrong_data_layer():
class WrongDataLayer(object):
pass
yield WrongDataLayer
def test_wrong_data_layer_inheritence(wrong_data_layer):
with pytest.raises(Exception):
class PersonDetail(ResourceDetail):
data_layer = {'class': wrong_data_layer}
PersonDetail()
def test_wrong_data_layer_kwargs_type():
with pytest.raises(Exception):
class PersonDetail(ResourceDetail):
data_layer = list()
PersonDetail()
def test_get_list_jsonapiexception(client, register_routes):
with client:
response = client.get('/persons_jsonapiexception', content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_exception(client, register_routes):
with client:
response = client.get('/persons_exception', content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_without_schema(client, register_routes):
with client:
response = client.post('/persons_without_schema', content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_bad_request(client, register_routes):
with client:
querystring = urlencode({'page[number': 3})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_fields(client, register_routes):
with client:
querystring = urlencode({'fields[person]': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_include(client, register_routes):
with client:
querystring = urlencode({'include': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_filters_parsing(client, register_routes):
with client:
querystring = urlencode({'filter': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_page(client, register_routes):
with client:
querystring = urlencode({'page[number]': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_invalid_sort(client, register_routes):
with client:
querystring = urlencode({'sort': 'error'})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_detail_object_not_found(client, register_routes):
with client:
response = client.get('/persons/3', content_type='application/vnd.api+json')
assert response.status_code == 200
def test_post_relationship_related_object_not_found(client, register_routes, person):
payload = {
'data': [
{
'type': 'computer',
'id': '2'
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 404
def test_get_relationship_relationship_field_not_found(client, register_routes, person):
with client:
response = client.get('/persons/' + str(person.person_id) + '/relationships/computer',
content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_invalid_filters_val(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'computers', 'op': 'any'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_name(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'computers__serial', 'op': 'ilike', 'val': '%1%'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 200
def test_get_list_no_name(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'op': 'any', 'val': '1'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_no_op(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'computers__serial', 'val': '1'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 500
def test_get_list_attr_error(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'error', 'op': 'eq', 'val': '1'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_get_list_field_error(client, register_routes):
with client:
querystring = urlencode({'filter': json.dumps([{'name': 'name', 'op': 'eq', 'field': 'error'}])})
response = client.get('/persons' + '?' + querystring, content_type='application/vnd.api+json')
assert response.status_code == 400
def test_sqlalchemy_data_layer_without_session(person_model, person_list):
with pytest.raises(Exception):
SqlalchemyDataLayer(dict(model=person_model, resource=person_list))
def test_sqlalchemy_data_layer_without_model(session, person_list):
with pytest.raises(Exception):
SqlalchemyDataLayer(dict(session=session, resource=person_list))
def test_sqlalchemy_data_layer_create_object_error(session, person_model, person_list):
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model, resource=person_list))
dl.create_object(dict(), dict())
def test_sqlalchemy_data_layer_get_object_error(session, person_model):
with pytest.raises(Exception):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model, id_field='error'))
dl.get_object(dict())
def test_sqlalchemy_data_layer_update_object_error(session, person_model, person_list, monkeypatch):
def commit_mock():
raise JsonApiException()
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model, resource=person_list))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
dl.update_object(dict(), dict(), dict())
def test_sqlalchemy_data_layer_delete_object_error(session, person_model, person_list, monkeypatch):
def commit_mock():
raise JsonApiException()
def delete_mock(obj):
pass
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model, resource=person_list))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
monkeypatch.setattr(dl.session, 'delete', delete_mock)
dl.delete_object(dict(), dict())
def test_sqlalchemy_data_layer_create_relationship_field_not_found(session, person_model):
with pytest.raises(Exception):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.create_relationship(dict(), 'error', '', dict(id=1))
def test_sqlalchemy_data_layer_create_relationship_error(session, person_model, get_object_mock, monkeypatch):
def commit_mock():
raise JsonApiException()
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
monkeypatch.setattr(dl, 'get_object', get_object_mock)
dl.create_relationship(dict(data=None), 'foo', '', dict(id=1))
def test_sqlalchemy_data_layer_get_relationship_field_not_found(session, person_model):
with pytest.raises(RelationNotFound):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.get_relationship('error', '', '', dict(id=1))
def test_sqlalchemy_data_layer_update_relationship_field_not_found(session, person_model):
with pytest.raises(Exception):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.update_relationship(dict(), 'error', '', dict(id=1))
def test_sqlalchemy_data_layer_update_relationship_error(session, person_model, get_object_mock, monkeypatch):
def commit_mock():
raise JsonApiException()
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
monkeypatch.setattr(dl, 'get_object', get_object_mock)
dl.update_relationship(dict(data=None), 'foo', '', dict(id=1))
def test_sqlalchemy_data_layer_delete_relationship_field_not_found(session, person_model):
with pytest.raises(Exception):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.delete_relationship(dict(), 'error', '', dict(id=1))
def test_sqlalchemy_data_layer_delete_relationship_error(session, person_model, get_object_mock, monkeypatch):
def commit_mock():
raise JsonApiException()
with pytest.raises(JsonApiException):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
monkeypatch.setattr(dl.session, 'commit', commit_mock)
monkeypatch.setattr(dl, 'get_object', get_object_mock)
dl.delete_relationship(dict(data=None), 'foo', '', dict(id=1))
def test_sqlalchemy_data_layer_sort_query_error(session, person_model, monkeypatch):
with pytest.raises(InvalidSort):
dl = SqlalchemyDataLayer(dict(session=session, model=person_model))
dl.sort_query(None, [dict(field='test')])
def test_post_list_incorrect_type(client, register_routes, computer):
payload = {
'data': {
'type': 'error',
'attributes': {
'name': 'test'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.post('/persons', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 409
def test_post_list_validation_error(client, register_routes, computer):
payload = {
'data': {
'type': 'person',
'attributes': {},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.post('/persons', data=json.dumps(payload), content_type='application/vnd.api+json')
assert response.status_code == 422
def test_patch_detail_incorrect_type(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id),
'type': 'error',
'attributes': {
'name': 'test2'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_patch_detail_validation_error(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id),
'type': 'person',
'attributes': {
'name': {'test2': 'error'}
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 422
def test_patch_detail_missing_id(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person',
'attributes': {
'name': 'test2'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_detail_wrong_id(client, register_routes, computer, person):
payload = {
'data': {
'id': 'error',
'type': 'person',
'attributes': {
'name': 'test2'
},
'relationships': {
'computers': {
'data': [
{
'type': 'computer',
'id': str(computer.id)
}
]
}
}
}
}
with client:
response = client.patch('/persons/' + str(person.person_id),
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 422
def test_post_relationship_no_data(client, register_routes, computer, person):
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(dict()),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_not_list_missing_type(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id)
}
}
with client:
response = client.post('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_not_list_missing_id(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person'
}
}
with client:
response = client.post('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_not_list_wrong_type(client, register_routes, computer, person):
payload = {
'data': {
'type': 'error',
'id': str(person.person_id)
}
}
with client:
response = client.post('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_post_relationship_missing_type(client, register_routes, computer, person):
payload = {
'data': [
{
'id': str(computer.id)
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_missing_id(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_post_relationship_wrong_type(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'error',
'id': str(computer.id)
}
]
}
with client:
response = client.post('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_patch_relationship_no_data(client, register_routes, computer, person):
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(dict()),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_not_list_missing_type(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id)
}
}
with client:
response = client.patch('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_not_list_missing_id(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person'
}
}
with client:
response = client.patch('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_not_list_wrong_type(client, register_routes, computer, person):
payload = {
'data': {
'type': 'error',
'id': str(person.person_id)
}
}
with client:
response = client.patch('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_patch_relationship_missing_type(client, register_routes, computer, person):
payload = {
'data': [
{
'id': str(computer.id)
}
]
}
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_missing_id(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
}
]
}
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_patch_relationship_wrong_type(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'error',
'id': str(computer.id)
}
]
}
with client:
response = client.patch('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_delete_relationship_no_data(client, register_routes, computer, person):
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(dict()),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_not_list_missing_type(client, register_routes, computer, person):
payload = {
'data': {
'id': str(person.person_id)
}
}
with client:
response = client.delete('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_not_list_missing_id(client, register_routes, computer, person):
payload = {
'data': {
'type': 'person'
}
}
with client:
response = client.delete('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_not_list_wrong_type(client, register_routes, computer, person):
payload = {
'data': {
'type': 'error',
'id': str(person.person_id)
}
}
with client:
response = client.delete('/computers/' + str(computer.id) + '/relationships/owner',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_delete_relationship_missing_type(client, register_routes, computer, person):
payload = {
'data': [
{
'id': str(computer.id)
}
]
}
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_missing_id(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'computer',
}
]
}
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 400
def test_delete_relationship_wrong_type(client, register_routes, computer, person):
payload = {
'data': [
{
'type': 'error',
'id': str(computer.id)
}
]
}
with client:
response = client.delete('/persons/' + str(person.person_id) + '/relationships/computers?include=computers',
data=json.dumps(payload),
content_type='application/vnd.api+json')
assert response.status_code == 409
def test_base_data_layer():
base_dl = BaseDataLayer(dict())
with pytest.raises(NotImplementedError):
base_dl.create_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.get_object(dict())
with pytest.raises(NotImplementedError):
base_dl.get_collection(None, dict())
with pytest.raises(NotImplementedError):
base_dl.update_object(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.delete_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.create_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.get_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.update_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.delete_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.query(dict())
with pytest.raises(NotImplementedError):
base_dl.before_create_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_create_object(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_get_object(dict())
with pytest.raises(NotImplementedError):
base_dl.after_get_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_get_collection(None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_get_collection(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_update_object(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_update_object(None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_delete_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_delete_object(None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_create_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_create_relationship(None, None, None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_get_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_get_relationship(None, None, None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_update_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_update_relationship(None, None, None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.before_delete_relationship(None, None, None, dict())
with pytest.raises(NotImplementedError):
base_dl.after_delete_relationship(None, None, None, None, None, dict())
def test_qs_manager():
with pytest.raises(ValueError):
QSManager([], None)
def test_api(app, person_list):
api = Api(app)
api.route(person_list, 'person_list', '/persons', '/person_list')
api.init_app()
def test_api_resources(app, person_list):
api = Api()
api.route(person_list, 'person_list2', '/persons', '/person_list')
api.init_app(app)
def test_relationship_containing_hyphens(client, register_routes, person_computers, computer_schema, person):
response = client.get('/persons/{}/relationships/computers-owned'.format(person.person_id), content_type='application/vnd.api+json')
assert response.status_code == 200
| 35.882353 | 136 | 0.592289 |
79581889879251db51949ffe010f5cc2de42e9e7 | 170 | py | Python | mayan/apps/ocr/backends/literals.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | 1 | 2021-06-17T18:24:25.000Z | 2021-06-17T18:24:25.000Z | mayan/apps/ocr/backends/literals.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | 7 | 2020-06-06T00:01:04.000Z | 2022-01-13T01:47:17.000Z | mayan/apps/ocr/backends/literals.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, unicode_literals
DEFAULT_TESSERACT_BINARY_PATH = '/usr/bin/tesseract'
DEFAULT_TESSERACT_TIMEOUT = 600 # 600 seconds, 10 minutes
| 34 | 58 | 0.835294 |
7958188b60f8413be5bae75a44a20f1e9cd91774 | 10,551 | py | Python | openstack_dashboard/api/lbaas.py | rackerlabs/horizon | 8914ed95fc8fa44771f5f3ec827e325a5855b60a | [
"Apache-2.0"
] | 2 | 2018-10-21T22:30:29.000Z | 2020-11-21T08:58:31.000Z | openstack_dashboard/api/lbaas.py | e/horizon | abbce256b68178ebf42816eb87303292212c1dfe | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/api/lbaas.py | e/horizon | abbce256b68178ebf42816eb87303292212c1dfe | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from openstack_dashboard.api.neutron import NeutronAPIDictWrapper
from openstack_dashboard.api.neutron import neutronclient
from openstack_dashboard.api.neutron import subnet_get
class Vip(NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer vip"""
def __init__(self, apiresource):
super(Vip, self).__init__(apiresource)
class Pool(NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool"""
def __init__(self, apiresource):
super(Pool, self).__init__(apiresource)
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
def readable(self, request):
pFormatted = {'id': self.id,
'name': self.name,
'description': self.description,
'protocol': self.protocol,
'health_monitors': self.health_monitors}
try:
pFormatted['subnet_id'] = self.subnet_id
pFormatted['subnet_name'] = subnet_get(
request, self.subnet_id).cidr
except:
pFormatted['subnet_id'] = self.subnet_id
pFormatted['subnet_name'] = self.subnet_id
if self.vip_id is not None:
try:
pFormatted['vip_id'] = self.vip_id
pFormatted['vip_name'] = vip_get(
request, self.vip_id).name
except:
pFormatted['vip_id'] = self.vip_id
pFormatted['vip_name'] = self.vip_id
else:
pFormatted['vip_id'] = None
pFormatted['vip_name'] = None
return self.AttributeDict(pFormatted)
class Member(NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer member"""
def __init__(self, apiresource):
super(Member, self).__init__(apiresource)
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
def readable(self, request):
mFormatted = {'id': self.id,
'address': self.address,
'protocol_port': self.protocol_port}
try:
mFormatted['pool_id'] = self.pool_id
mFormatted['pool_name'] = pool_get(
request, self.pool_id).name
except:
mFormatted['pool_id'] = self.pool_id
mFormatted['pool_name'] = self.pool_id
return self.AttributeDict(mFormatted)
class PoolStats(NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool stats"""
def __init__(self, apiresource):
super(PoolStats, self).__init__(apiresource)
class PoolMonitor(NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool health monitor"""
def __init__(self, apiresource):
super(PoolMonitor, self).__init__(apiresource)
def vip_create(request, **kwargs):
"""Create a vip for a specified pool.
:param request: request context
:param address: virtual IP address
:param name: name for vip
:param description: description for vip
:param subnet_id: subnet_id for subnet of vip
:param protocol_port: transport layer port number for vip
:returns: Vip object
"""
body = {'vip': {'address': kwargs['address'],
'name': kwargs['name'],
'description': kwargs['description'],
'subnet_id': kwargs['subnet_id'],
'protocol_port': kwargs['protocol_port'],
'protocol': kwargs['protocol'],
'pool_id': kwargs['pool_id'],
'session_persistence': kwargs['session_persistence'],
'connection_limit': kwargs['connection_limit'],
'admin_state_up': kwargs['admin_state_up']
}}
vip = neutronclient(request).create_vip(body).get('vip')
return Vip(vip)
def vips_get(request, **kwargs):
vips = neutronclient(request).list_vips().get('vips')
return [Vip(v) for v in vips]
def vip_get(request, vip_id):
vip = neutronclient(request).show_vip(vip_id).get('vip')
return Vip(vip)
def vip_update(request, vip_id, **kwargs):
vip = neutronclient(request).update_vip(vip_id, kwargs).get('vip')
return Vip(vip)
def vip_delete(request, vip_id):
neutronclient(request).delete_vip(vip_id)
def pool_create(request, **kwargs):
"""Create a pool for specified protocol
:param request: request context
:param name: name for pool
:param description: description for pool
:param subnet_id: subnet_id for subnet of pool
:param protocol: load balanced protocol
:param lb_method: load balancer method
:param admin_state_up: admin state (default on)
"""
body = {'pool': {'name': kwargs['name'],
'description': kwargs['description'],
'subnet_id': kwargs['subnet_id'],
'protocol': kwargs['protocol'],
'lb_method': kwargs['lb_method'],
'admin_state_up': kwargs['admin_state_up']
}}
pool = neutronclient(request).create_pool(body).get('pool')
return Pool(pool)
def pools_get(request, **kwargs):
pools = neutronclient(request).list_pools().get('pools')
return [Pool(p) for p in pools]
def pool_get(request, pool_id):
pool = neutronclient(request).show_pool(pool_id).get('pool')
return Pool(pool)
def pool_update(request, pool_id, **kwargs):
pool = neutronclient(request).update_pool(pool_id, kwargs).get('pool')
return Pool(pool)
def pool_delete(request, pool):
neutronclient(request).delete_pool(pool)
# not linked to UI yet
def pool_stats(request, pool_id, **kwargs):
stats = neutronclient(request).retrieve_pool_stats(pool_id, **kwargs)
return PoolStats(stats)
def pool_health_monitor_create(request, **kwargs):
"""Create a health monitor
:param request: request context
:param type: type of monitor
:param delay: delay of monitor
:param timeout: timeout of monitor
:param max_retries: max retries [1..10]
:param http_method: http method
:param url_path: url path
:param expected_codes: http return code
:param admin_state_up: admin state
"""
monitor_type = kwargs['type'].upper()
body = {'health_monitor': {'type': monitor_type,
'delay': kwargs['delay'],
'timeout': kwargs['timeout'],
'max_retries': kwargs['max_retries'],
'admin_state_up': kwargs['admin_state_up']
}}
if monitor_type in ['HTTP', 'HTTPS']:
body['health_monitor']['http_method'] = kwargs['http_method']
body['health_monitor']['url_path'] = kwargs['url_path']
body['health_monitor']['expected_codes'] = kwargs['expected_codes']
mon = neutronclient(request).create_health_monitor(body).get(
'health_monitor')
return PoolMonitor(mon)
def pool_health_monitors_get(request, **kwargs):
monitors = neutronclient(request
).list_health_monitors().get('health_monitors')
return [PoolMonitor(m) for m in monitors]
def pool_health_monitor_get(request, monitor_id):
monitor = neutronclient(request
).show_health_monitor(monitor_id
).get('health_monitor')
return PoolMonitor(monitor)
def pool_health_monitor_update(request, monitor_id, **kwargs):
monitor = neutronclient(request).update_health_monitor(monitor_id, kwargs)
return PoolMonitor(monitor)
def pool_health_monitor_delete(request, mon_id):
neutronclient(request).delete_health_monitor(mon_id)
def member_create(request, **kwargs):
"""Create a load balance member
:param request: request context
:param pool_id: pool_id of pool for member
:param address: IP address
:param protocol_port: transport layer port number
:param weight: weight for member
:param admin_state_up: admin_state
"""
body = {'member': {'pool_id': kwargs['pool_id'],
'address': kwargs['address'],
'protocol_port': kwargs['protocol_port'],
'weight': kwargs['weight'],
'admin_state_up': kwargs['admin_state_up']
}}
member = neutronclient(request).create_member(body).get('member')
return Member(member)
def members_get(request, **kwargs):
members = neutronclient(request).list_members().get('members')
return [Member(m) for m in members]
def member_get(request, member_id):
member = neutronclient(request).show_member(member_id).get('member')
return Member(member)
def member_update(request, member_id, **kwargs):
member = neutronclient(request).update_member(member_id, kwargs)
return Member(member)
def member_delete(request, mem_id):
neutronclient(request).delete_member(mem_id)
def pool_monitor_association_create(request, **kwargs):
"""Associate a health monitor with pool
:param request: request context
:param monitor_id: id of monitor
:param pool_id: id of pool
"""
body = {'health_monitor': {'id': kwargs['monitor_id'], }}
neutronclient(request).associate_health_monitor(
kwargs['pool_id'], body)
def pool_monitor_association_delete(request, **kwargs):
"""Disassociate a health monitor from pool
:param request: request context
:param monitor_id: id of monitor
:param pool_id: id of pool
"""
neutronclient(request).disassociate_health_monitor(
kwargs['pool_id'], kwargs['monitor_id'])
| 32.767081 | 78 | 0.635106 |
79581892ed1662a57529a88ee3197cf6296762b6 | 32,755 | py | Python | flax/core/lift.py | jacobhepkema/flax | 074ec83e453df92b4b68e7332e7f3511270e007b | [
"Apache-2.0"
] | null | null | null | flax/core/lift.py | jacobhepkema/flax | 074ec83e453df92b4b68e7332e7f3511270e007b | [
"Apache-2.0"
] | null | null | null | flax/core/lift.py | jacobhepkema/flax | 074ec83e453df92b4b68e7332e7f3511270e007b | [
"Apache-2.0"
] | 1 | 2021-06-06T23:19:32.000Z | 2021-06-06T23:19:32.000Z | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jax transform lifting."""
import collections
from dataclasses import dataclass
import functools
import jax
from jax import random
from typing import Any, Callable, Sequence, Union, Iterable, Optional, Mapping, TypeVar, Generic
from .frozen_dict import freeze
from .frozen_dict import FrozenDict
from .frozen_dict import unfreeze
from .scope import Scope, DenyList, CollectionFilter, PRNGSequenceFilter, in_filter, union_filters, intersect_filters, subtract_filters, group_collections
from . import axes_scan
T = TypeVar('T')
def _dedup_scopes(scopes):
paths = []
# must preseve insertion order for duplication to work correctly
minimal_set = collections.OrderedDict((s, ()) for s in scopes)
for leaf in scopes:
scope = leaf.parent
max_parent = leaf
max_parent_path = ()
path = [leaf.name]
while scope is not None:
if scope in minimal_set:
max_parent = scope
max_parent_path = tuple(reversed(path))
path.append(scope.name)
scope = scope.parent
if max_parent is not leaf and leaf in minimal_set:
del minimal_set[leaf]
paths.append((max_parent, max_parent_path))
return tuple(minimal_set), tuple(paths)
def _dup_scopes(orig_scopes, scopes, paths):
mapping = dict(zip(orig_scopes, scopes))
scopes = []
for root, path in paths:
scope = mapping[root]
for name in path:
scope = scope.push(name, reuse=True)
scopes.append(scope)
return scopes
def _transpose(xs):
return tuple(zip(*xs))
def pack(fn: Callable[..., Any],
in_variable_filters: Sequence[CollectionFilter],
out_variable_filters: Sequence[CollectionFilter],
rng_filters: Sequence[PRNGSequenceFilter],
name=None) -> Callable[..., Any]:
"""Pack variables and rngs for functional transformations.
The pack function is the building block for all other lifted transformations.
"""
@functools.wraps(fn)
def wrapper(scope_tree: Scope, *args):
# pylint: disable=protected-access
scopes, treedef = jax.tree_flatten(scope_tree)
scopes, paths = _dedup_scopes(scopes)
variable_groups_xs = []
for scope in scopes:
scope._validate_trace_level()
scope._populate_collections()
variable_groups_xs.append(group_collections(
scope._variables, in_variable_filters))
variable_groups_xs_t = _transpose(variable_groups_xs)
# Make sure that in-only variable collections are frozen
for variable_group_xs in variable_groups_xs_t:
for variable_group in variable_group_xs:
for col_name, collection in variable_group.items():
col_in_out = any(
in_filter(col_filter, col_name)
for col_filter in out_variable_filters)
if not col_in_out:
variable_group[col_name] = freeze(collection)
rng_groups_xs = []
for scope in scopes:
rng_groups = group_collections(scope.rngs, rng_filters)
for rng_group in rng_groups:
for kind in rng_group:
rng_group[kind] = scope.make_rng(kind)
rng_groups_xs.append(rng_groups)
rng_groups_xs_t = _transpose(rng_groups_xs)
inner_scopes = []
def scope_fn(variable_groups_xs_t, rng_groups_xs_t):
nonlocal inner_scopes
for inner_scope in inner_scopes:
inner_scope.invalidate()
inner_scopes = []
mutable = False
for out_filter in out_variable_filters:
mutable = union_filters(mutable, out_filter)
# could be () in the edge case where no rngs or variable_groups are lifted
# in this case fallback to ((),) * len(scopes) to make sure the zip has something
# to iterate over for each scope.
variable_groups_xs = _transpose(variable_groups_xs_t) or ((),) * len(scopes)
rng_groups_xs = _transpose(rng_groups_xs_t) or ((),) * len(scopes)
assert len(variable_groups_xs) == len(scopes)
assert len(rng_groups_xs) == len(scopes)
for variable_groups, rng_groups, scope in zip(variable_groups_xs, rng_groups_xs, scopes):
variables = {}
rngs = {}
for variable_group in variable_groups:
variables.update(variable_group)
for rng_group in rng_groups:
rngs.update(rng_group)
# make sure variable dicts are cloned and can't be manipulated by ref sharing.
variables = jax.tree_map(lambda x: x, variables)
scope_mutable = intersect_filters(scope.root.mutable, mutable)
new_path = scope.path
if name:
if new_path:
new_path = new_path[:-1] + (f'{name}({new_path[-1]})',)
else:
new_path = (f'{name}()',)
inner_scope = Scope(
variables, name=scope.name, rngs=rngs,
mutable=scope_mutable, parent=None,
path=new_path)
inner_scopes.append(inner_scope)
inner_scopes = _dup_scopes(scopes, inner_scopes, paths)
return treedef.unflatten(inner_scopes)
def repack(inner_scope_tree):
inner_scopes = treedef.flatten_up_to(inner_scope_tree)
inner_scopes, inner_paths = _dedup_scopes(inner_scopes)
inner_scopes = list(inner_scopes)
assert [p for _, p in paths] == [p for _, p in inner_paths]
out_variable_groups_xs = []
for inner_scope in inner_scopes:
inner_scope.invalidate()
inner_scope._validate_trace_level()
mutable_variables = {key: val for key, val
in inner_scope._variables.items()
if in_filter(inner_scope.mutable, key)}
out_variable_groups = group_collections(
mutable_variables, tuple(out_variable_filters) + (True,))
remainder = tuple(out_variable_groups[-1].keys())
if remainder:
raise ValueError(f'unmapped output variables: {remainder}')
out_variable_groups_xs.append(out_variable_groups[:-1])
return _transpose(out_variable_groups_xs)
try:
y, out_variable_groups_xs_t = fn(
scope_fn, repack,
variable_groups_xs_t, rng_groups_xs_t,
*args)
finally:
for inner_scope in inner_scopes:
inner_scope.invalidate()
out_variable_groups_xs = _transpose(out_variable_groups_xs_t)
for scope, out_variable_groups in zip(scopes, out_variable_groups_xs):
for out_variable_group in out_variable_groups:
for col_name, collection in out_variable_group.items():
for var_name, value in collection.items():
scope.put_variable(col_name, var_name, value)
return y
return wrapper
id_fn = lambda x: x
def transform(
fn: Callable[..., Any],
target: CollectionFilter,
trans_in_fn: Callable[..., Any] = id_fn,
trans_out_fn: Callable[..., Any] = id_fn,
init: bool = False, mutable: bool = False,
rngs: PRNGSequenceFilter = True, variables: CollectionFilter = True):
"""Locally transform Variables inside a scope.
Args:
fn: the function to be transformed.
target: the collection(s) to be transformed.
trans_in_fn: creates a view of the target variables.
trans_out_fn: transforms the updated variables in the view after mutation.
init: If True, variables are initialized before transformation.
rngs: PRNGSequences added to the transformed scope (default: all).
variables: Additional Variable collections added to the transformed scope.
Besides those specified by `target` (default: all).
"""
def wrapper(scope_fn, repack, variable_groups, rng_groups, treedef, *args):
target, variables = variable_groups
if init:
scope = scope_fn((target, variables), rng_groups)
fn(scope, *args)
target, _ = repack(scope)
target_tree = trans_out_fn(treedef.unflatten(target))
target = treedef.flatten_up_to(target_tree)
target_tree = treedef.unflatten(map(unfreeze, target))
target_tree = trans_in_fn(target_tree)
target = treedef.flatten_up_to(target_tree)
if not is_target_out:
target = tuple(map(freeze, target))
scope = scope_fn((target, variables), rng_groups)
y = fn(scope, *args)
out_target, out_vars = repack(scope)
if is_target_out:
out_target_tree = trans_out_fn(treedef.unflatten(out_target))
out_target = treedef.flatten_up_to(out_target_tree)
return y, (out_target, out_vars)
is_target_out = mutable or init
in_vars = (target, variables)
out_vars = in_vars if is_target_out else (False, subtract_filters(variables, target))
wrapper = pack(wrapper, in_vars, out_vars, (rngs,), name='transform')
@functools.wraps(wrapper)
def catch_treedef(scopes, *args):
treedef = jax.tree_structure(scopes)
return wrapper(scopes, treedef, *args)
return catch_treedef
def transform_module(fn: Callable[..., Any],
target: CollectionFilter = 'params',
trans_in_fn: Callable[..., Any] = id_fn,
trans_out_fn: Callable[..., Any] = id_fn,
mutable: bool = False,
rngs: PRNGSequenceFilter = True,
variables: CollectionFilter = True):
""""Wrapper around `transform` for automatic init detection.
This function will detect if the target collection exists.
If it doesn't `init=True` is will be passed to `transform`.
See `transform` for more details.
"""
def wrapper(scope, *args, **kwargs):
vs = scope.variables()
is_init = target not in vs or not vs[target]
fn_p = functools.partial(fn, **kwargs)
lift_trans = transform(
fn_p,
target,
trans_in_fn=trans_in_fn,
trans_out_fn=trans_out_fn,
init=is_init, mutable=mutable,
rngs=rngs, variables=variables)
return lift_trans(scope, *args)
return wrapper
def swap_collection(fn: Callable[..., Any], col_a: str, col_b: str):
"""Swap two collections."""
def swap(target):
a = target[col_a] if col_a in target else {}
b = target[col_b] if col_b in target else {}
target[col_b], target[col_a] = a, b
return target
return transform(fn, (col_a, col_b), swap, swap, mutable=True)
@dataclass(frozen=True)
class In(Generic[T]):
"""Specifies a variable collection should only be lifted as input."""
axis: Any # pytype does not support generic variable annotation
@dataclass(frozen=True)
class Out(Generic[T]):
"""Specifies a variable collection should only be lifted as output."""
axis: Any # pytype does not support generic variable annotation
def _split_in_out_axes(xs: Mapping[CollectionFilter, Any]):
unpack = lambda v: v.axis if isinstance(v, (In, Out)) else v
in_axes = {k: unpack(v) for k, v in xs.items() if not isinstance(v, Out)}
out_axes = {k: unpack(v) for k, v in xs.items() if not isinstance(v, In)}
return in_axes, out_axes
Axis = Optional[int]
InOutAxis = Union[Axis, In[Axis], Out[Axis]]
def vmap(fn: Callable[..., Any],
variable_axes: Mapping[CollectionFilter, InOutAxis],
split_rngs: Mapping[PRNGSequenceFilter, bool],
in_axes=0, out_axes=0,
axis_size: Optional[int] = None,
axis_name: Optional[str] = None) -> Callable[..., Any]:
"""A lifted version of ``jax.vmap``.
See ``jax.vmap`` for the unlifted batch transform in Jax.
``vmap`` can be used to add a batch axis to a scope function.
For example we could create a version of ``dense`` with
a batch axis that does not share parameters::
batch_dense = lift.vmap(
nn.dense,
in_axes=(0, None),
variable_axes={'params': 0},
split_rngs={'params': True})
By using ``variable_axes={'params': 0}``, we indicate that the
parameters themselves are mapped over and therefore not shared along
the mapped axis. Consequently, we also split the 'params' RNG,
otherwise the parameters would be initialized identically along
the mapped axis.
Similarly, ``vmap`` could be use to add a batch axis with parameter
sharing::
batch_foo = lift.vmap(
foo,
in_axes=0, out_axes=0,
variable_axes={'params': None},
split_rngs={'params': False})
Here we use ``variable_axes={'params': None}`` to indicate the parameter
variables are shared along the mapped axis. Consequently, the 'params'
RNG must also be shared.
Args:
target: the function to be transformed.
variable_axes: the variable collections that are lifted into the
batching transformation. Use `None` to indicate a broadcasted
collection or an integer to map over an axis.
split_rngs: Split PRNG sequences will be different for each index
of the batch dimension. Unsplit PRNGs will be broadcasted.
in_axes: Specifies the mapping of the input arguments (see `jax.vmap).
out_axes: Specifies the mapping of the return value (see `jax.vmap).
axis_size: Specifies the size of the batch axis. This only needs
to be specified if it cannot be derived from the input arguments.
axis_name: Specifies a name for the batch axis. Can be used together
with parallel reduction primitives (e.g. `jax.lax.pmean`,
`jax.lax.ppermute`, etc.)
"""
variable_in_axes, variable_out_axes = _split_in_out_axes(variable_axes)
variable_in_groups, variable_in_axes = _unzip2(variable_in_axes.items())
variable_out_groups, variable_out_axes = _unzip2(variable_out_axes.items())
rng_groups, rng_splits = _unzip2(split_rngs.items())
rng_axes = tuple(0 if rng_split else None for rng_split in rng_splits)
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
def find_axis_size(axis, x):
if axis is not None:
leaves = jax.tree_leaves(x)
if leaves:
return leaves[0].shape[axis]
return ()
# split rngs
axis_sizes = jax.tree_multimap(find_axis_size, (variable_in_axes, in_axes), (variable_groups, args))
axis_sizes = set(jax.tree_leaves(axis_sizes))
if axis_size is None and len(axis_sizes) == 1:
d_axis_size, = axis_sizes
elif len(axis_sizes) > 1:
raise ValueError(f'Inconsistent batch axis sizes: {axis_sizes}')
elif axis_size is None:
raise ValueError('axis_size should be specified manually.')
else:
d_axis_size = axis_size
split_fn = lambda rng: random.split(rng, d_axis_size)
rng_groups = tuple(
jax.tree_map(split_fn, rng_group) if split else rng_group
for rng_group, split in zip(rng_groups, rng_splits))
@functools.partial(jax.vmap,
in_axes=(variable_in_axes, rng_axes, in_axes),
out_axes=(out_axes, variable_out_axes),
axis_name=axis_name)
@functools.wraps(fn)
def mapped(variable_groups, rng_groups, args):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args)
return y, repack_fn(scope)
return mapped(variable_groups, rng_groups, args)
return pack(
inner, variable_in_groups, variable_out_groups, rng_groups,
name='vmap')
ScanAxis = int
InOutScanAxis = Union[ScanAxis, In[ScanAxis], Out[ScanAxis]]
def scan(fn: Callable[..., Any],
variable_axes: Mapping[CollectionFilter, InOutScanAxis] = {},
variable_broadcast: CollectionFilter = False,
variable_carry: CollectionFilter = False,
split_rngs: Mapping[PRNGSequenceFilter, bool] = {},
in_axes=0, out_axes=0,
length: Optional[int] = None,
reverse: bool = False) -> Callable[..., Any]:
"""A lifted version of ``jax.lax.scan``.
See ``jax.lax.scan`` for the unlifted scan in Jax.
To improve consistency with ``vmap``, this version of scan
uses ``in_axes`` and ``out_axes`` to determine which arguments
are scanned over and along which axis.
``scan`` distinguishes between 3 different types of values inside the loop:
1. **scan**: a value that is iterated over in a loop. All scan values must
have the same size in the axis they are scanned over. Scanned outputs
will be stacked along the scan axis.
2. **carry**: A carried value is updated at each loop iteration. It must
have the same shape and dtype throughout the loop.
3. **broadcast**: a value that is closed over by the loop. When a variable
is broadcasted they are typically initialized inside the loop body but
independent of the loop variables.
The loop body should have the signature
``(scope, body, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys``
are the scan values that go in and out of the loop.
Example::
scope.variable('counter', 'i', jnp.zeros, ())
def body_fn(scope, c, x):
counter = scope.variable('counter', 'i', jnp.zeros, ())
counter.value += 1
x = scope.child(nn.dense)(x, 1)
return c, x
_, ys = lift.scan(
body_fn,
variable_carry='counter',
variable_broadcast='params',
split_rngs={'params': False})(scope, (), xs)
Args:
fn: the function to be transformed.
variable_axes: the variable collections that are scanned over.
variable_broadcast: Specifies the broadcasted variable collections.
A broadcasted variable should not depend on any computation that cannot be lifted out of the loop.
This is typically used to define shared parameters inside the fn.
variable_carry: Specifies the variable collections that are carried through the loop.
Mutations to these variables are carried to the next iteration and will be preserved
when the scan finishes.
split_rngs: Split PRNG sequences will be different for each loop iterations.
If split is False the PRNGs will be the same across iterations.
in_axes: Specifies the axis to scan over for the arguments. Should be a prefix
tree of the arguments. Use `flax.core.broadcast` to feed an entire input
to each iteration of the scan body.
out_axes: Specifies the axis to scan over for the return value. Should be a prefix
tree of the return value.
length: Specifies the number of loop iterations. This only needs
to be specified if it cannot be derivied from the scan arguments.
reverse: If true, scan from end to start in reverse order.
Returns:
The scan function with the signature ``(scope, carry, *xxs) -> (carry, yys)``,
where ``xxs`` and ``yys`` are the scan values that go in and out of the loop.
"""
variable_in_axes, variable_out_axes = _split_in_out_axes(variable_axes)
variable_in_groups, variable_in_axes = _unzip2(variable_in_axes.items())
variable_out_groups, variable_out_axes = _unzip2(variable_out_axes.items())
assert all(isinstance(ax, int) for ax in variable_in_axes)
assert all(isinstance(ax, int) for ax in variable_out_axes)
rng_groups, rng_splits = _unzip2(split_rngs.items())
rng_axes = tuple(0 if rng_split else axes_scan.broadcast
for rng_split in rng_splits)
def inner(scope_fn, repack_fn,
variable_groups, rng_groups,
init, *args):
def find_length(axis, x):
if axis is not axes_scan.broadcast:
leaves = jax.tree_leaves(x)
if leaves:
return leaves[0].shape[axis]
return ()
# split rngs
lengths = jax.tree_multimap(find_length, in_axes, args)
lengths = set(jax.tree_leaves(lengths))
if length is None and len(lengths) == 1:
d_length, = lengths
elif len(lengths) > 1:
raise ValueError(f'Inconsistent scan lengths: {lengths}')
elif length is None:
raise ValueError('length should be specified manually.')
else:
d_length = length
split_fn = lambda rng: random.split(rng, d_length)
rng_groups = tuple(
jax.tree_map(split_fn, rng_group) if split else rng_group
for rng_group, split in zip(rng_groups, rng_splits))
@functools.partial(axes_scan.scan,
in_axes=(variable_in_axes, rng_axes, in_axes),
out_axes=(out_axes, variable_out_axes),
length=length, reverse=reverse)
def scanned(broadcast_vars, carry, scan_variable_groups, rng_groups, args):
carry_vars, c = carry
variable_groups = (broadcast_vars, carry_vars) + scan_variable_groups
scope = scope_fn(variable_groups, rng_groups)
c, y = fn(scope, c, *args)
out_vars = repack_fn(scope)
broadcast_vars_out = out_vars[0]
carry_vars = out_vars[1]
scan_vars = out_vars[2:]
# add immutable broadcast vars back to broadcast output
# otherwise they won't be fed to the actual scan body
for in_group, out_group in zip(broadcast_vars, broadcast_vars_out):
for col in in_group:
if col not in out_group:
out_group[col] = in_group[col]
return broadcast_vars_out, (carry_vars, c), (y, scan_vars)
broadcast_vars = variable_groups[0]
carry_vars = variable_groups[1]
scan_vars = variable_groups[2:]
broadcast_vars, (carry_vars, c), (ys, scan_vars) = scanned(
broadcast_vars, (carry_vars, init), scan_vars, rng_groups, args)
# remove immutable broadcast vars otherwise they will be updated
# with their own value which will cause an error
for out_group in broadcast_vars:
for name, col in tuple(out_group.items()):
if isinstance(col, FrozenDict):
del out_group[name]
out_vars = (broadcast_vars, carry_vars,) + scan_vars
return (c, ys), out_vars
return pack(
inner,
(variable_broadcast, variable_carry) + variable_in_groups,
(variable_broadcast, variable_carry) + variable_out_groups,
rng_groups,
name='scan')
def custom_vjp(fn: Callable[..., Any], backward_fn: Callable[..., Any],
grad_kind: CollectionFilter = 'params',
nondiff_argnums=()):
""""Lifted version of `jax.custom_vjp`.
`backward_fn` defines a custom vjp (backward gradient) for `fn`.
Example::
def fwd(scope, x, features):
y = nn.dense(scope, x, features)
return y, x
def bwd(features, scope_fn, params, res, g):
x = res
fn = lambda params, x: nn.dense(scope_fn(params), x, features)
_, pullback = jax.vjp(fn, params, x)
g_param, g_x = pullback(g)
g_param = jax.tree_map(jnp.sign, g_param)
return g_param, g_x
dense_sign_grad = lift.custom_vjp(fwd, backward_fn=bwd, nondiff_argnums=(2,))
Args:
fn: should return a tuple of output and auxiliary data for the backward pass.
backward_fn: arguments are passed as (*nondiff_args, scope_fn, grad_variables, aux, g_y)
where scope_fn takes grad_variables to create the scope,
aux is the auxiliary data returned by `fn`,
and g_y is the tangent of y.
"""
# TODO(jheek) is this transform general/flexible enough?
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
grad_variables, other_variables = variable_groups
def simple_scope_fn(grad_variables):
grad_variables = tuple(freeze(x) for x in grad_variables)
return scope_fn((grad_variables, other_variables), rng_groups)
def f(grad_variables, *args):
scope = scope_fn((grad_variables, other_variables), rng_groups)
y, _ = fn(scope, *args)
vars_out = repack_fn(scope)
return y, vars_out
f = jax.custom_vjp(f, nondiff_argnums=nondiff_argnums)
def f_fwd(grad_variables, *args):
scope = simple_scope_fn(grad_variables)
y, res = fn(scope, *args)
vars_out = repack_fn(scope)
return (y, vars_out), (res, grad_variables)
def f_bwd(*args):
nondiff_args = args[:-2]
res, g = args[-2:]
g_y, _ = g
user_res, grad_variables = res
return backward_fn(*nondiff_args, simple_scope_fn, grad_variables, user_res, g_y)
f.defvjp(f_fwd, f_bwd)
return f(grad_variables, *args)
variable_in_groups = (grad_kind, True,)
variable_out_groups = (grad_kind, True,)
rng_groups = (True,)
return pack(
inner, variable_in_groups, variable_out_groups, rng_groups,
name='custom_vjp')
def checkpoint(fn: Callable[..., Any],
variables: CollectionFilter = True,
rngs: PRNGSequenceFilter = True,
concrete: bool = False,
) -> Callable[..., Any]:
"""Lifted version of ``jax.checkpoint``.
This function is aliased to ``lift.remat`` just like ``jax.remat``.
Args:
fn: scope function for which intermediate computations should be
re-computed when computing gradients.
variables: The variable collections that are lifted. By default all
collections are lifted.
rngs: The PRNG sequences that are lifted. By default all PRNG sequences
are lifted.
concrete: Optional, boolean indicating whether ``fun`` may involve
value-dependent Python control flow (default False). Support for such
control flow is optional, and disabled by default, because in some
edge-case compositions with :func:`jax.jit` it can lead to some extra
computation.
Returns:
A wrapped version of ``fn``. When computing gradients intermediate
computations will be re-computed when computing gradients.
"""
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
@functools.partial(jax.remat, concrete=concrete)
@functools.wraps(fn)
def rematted(variable_groups, rng_groups, *args):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args)
return y, repack_fn(scope)
return rematted(variable_groups, rng_groups, *args)
return pack(inner, (variables,), (variables,), (rngs,), name='remat')
def _hashable_filter(x):
"""Hashable version of CollectionFilter."""
if isinstance(x, Iterable):
return tuple(x) # convert un-hashable list & sets to tuple
if isinstance(x, DenyList):
return DenyList(_hashable_filter(x.deny)) # convert inner filter recursively
return x
def jit(fn: Callable[..., Any],
variables: CollectionFilter = True,
rngs: PRNGSequenceFilter = True,
static_argnums: Union[int, Iterable[int]] = (),
donate_argnums: Union[int, Iterable[int]] = (),
device=None,
backend: Union[str, None] = None,
) -> Callable[..., Any]:
"""Lifted version of ``jax.jit``.
Args:
fn: Scope function to be jitted.
variables: The variable collections that are lifted. By default all
collections are lifted.
rngs: The PRNG sequences that are lifted. By default all PRNG sequences
are lifted.
static_argnums: An int or collection of ints specifying which positional
arguments to treat as static (compile-time constant). Operations that only
depend on static arguments will be constant-folded in Python (during
tracing), and so the corresponding argument values can be any Python
object. Static arguments should be hashable, meaning both ``__hash__`` and
``__eq__`` are implemented, and immutable. Calling the jitted function
with different values for these constants will trigger recompilation. If
the jitted function is called with fewer positional arguments than
indicated by ``static_argnums`` then an error is raised. Arguments that
are not arrays or containers thereof must be marked as static.
Defaults to ().
device: This is an experimental feature and the API is likely to change.
Optional, the Device the jitted function will run on. (Available devices
can be retrieved via :py:func:`jax.devices`.) The default is inherited from
XLA's DeviceAssignment logic and is usually to use ``jax.devices()[0]``.
backend: a string representing the XLA backend: ``'cpu'``, ``'gpu'``, or
``'tpu'``.
donate_argnums: Specify which arguments are "donated" to the computation.
It is safe to donate arguments if you no longer need them once the
computation has finished. In some cases XLA can make use of donated
buffers to reduce the amount of memory needed to perform a computation,
for example recycling one of your input buffers to store a result. You
should not reuse buffers that you donate to a computation, JAX will raise
an error if you try to.
Returns:
A wrapped version of ``fn``, set up for just-in-time compilation.
"""
if not isinstance(static_argnums, Iterable):
static_argnums = (static_argnums,)
if not isinstance(donate_argnums, Iterable):
donate_argnums = (donate_argnums,)
# offset argnums by two because first argument in the original function is the scope
# while jitted has 3 functions before the user arguments.
static_argnums = (0,) + tuple(i + 2 for i in static_argnums if i > 0)
donate_argnums = tuple(i + 2 for i in donate_argnums if i > 0)
# Close over scope_fn & repack_fn to avoid recompilation
# this is impure but we use the fingerprint arg to differentiate between cases
# where scope_fn or repack_fn actually produce non-identical results.
scope_fn = None # type: Callable
repack_fn = None # type: Callable
@functools.partial(jax.jit,
static_argnums=static_argnums,
donate_argnums=donate_argnums,
device=device, backend=backend)
@functools.wraps(fn)
def jitted(fingerprint, variable_groups, rng_groups, *args):
nonlocal scope_fn, repack_fn
# fingerprint is only used to differentiate the cache signature for cases
# where different collections are mutable.
del fingerprint
scope = scope_fn(variable_groups, rng_groups) # pylint: disable=not-callable
y = fn(scope, *args)
return y, repack_fn(scope) # pylint: disable=not-callable
def inner(scope_fun, repack_fun, variable_groups, rng_groups, *args):
nonlocal scope_fn, repack_fn
try:
scope_fn = scope_fun
repack_fn = repack_fun
scopes = jax.tree_leaves(scope_fn(variable_groups, rng_groups))
mutable = tuple(_hashable_filter(scope.mutable) for scope in scopes)
return jitted(mutable, variable_groups, rng_groups, *args)
finally:
scope_fn, repack_fn = None, None
return pack(inner, (variables,), (variables,), (rngs,), name='jit')
remat = checkpoint
def remat_scan(body_fn: Callable[..., Any], scope: Scope, carry: Any,
lengths: Sequence[int],
variable_broadcast: CollectionFilter = False,
variable_carry: CollectionFilter = False,
variable_axes: Mapping[CollectionFilter, InOutScanAxis] = {},
split_rngs: Mapping[PRNGSequenceFilter, bool] = {}):
"""Combines `lift.remat` and `lift.scan` for memory efficient scans.
Example::
def body_fn(scope, x):
return nn.dense(scope, x, features=x.shape[-1])
# 100x dense with O(sqrt(N)) memory for gradient computation
y = lift.remat_scan(
body_fn, scope, x, lengths=(10, 10),
variable_axes={'params': 0},
split_rngs={'params': True})
"""
# TODO(jheek) should remat scan have scan inputs/outputs?
scan_fn = functools.partial(
scan,
variable_broadcast=variable_broadcast,
variable_carry=variable_carry,
variable_axes=variable_axes,
split_rngs=split_rngs)
if len(lengths) == 1:
def wrapper(scope, carry):
return body_fn(scope, carry), ()
carry, _ = scan_fn(wrapper, length=lengths[0])(scope, carry)
else:
@remat
def inner_loop(scope, carry):
carry = remat_scan(body_fn, scope, carry, lengths[1:],
variable_broadcast, variable_carry,
variable_axes, split_rngs)
return carry, ()
carry, _ = scan_fn(inner_loop, length=lengths[0])(scope, carry)
return carry
def named_call(fn: Callable[..., Any], name: str) -> Callable[..., Any]:
"""Adds a name scope to `fn` during profiling."""
def inner(scope_fn, repack_fn, variable_groups, rng_groups, args, kwargs):
@functools.wraps(fn)
def named(variable_groups, rng_groups):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args, **kwargs)
return y, repack_fn(scope)
named = jax.named_call(named, name=name)
return named(variable_groups, rng_groups)
lifted = pack(inner, (True,), (True,), (True,))
def wrapper(scope, *args, **kwargs):
return lifted(scope, args, kwargs)
return wrapper
def _unzip2(xs):
ys = tuple(zip(*xs))
return ys if ys else ((), ())
| 39.654964 | 154 | 0.684354 |
79581961596d01acd2bab6e2ffe572d818efb0b2 | 6,771 | py | Python | libs/yowsup/yowsup/yowsup/layers/protocol_media/mediauploader.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | null | null | null | libs/yowsup/yowsup/yowsup/layers/protocol_media/mediauploader.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | null | null | null | libs/yowsup/yowsup/yowsup/layers/protocol_media/mediauploader.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | null | null | null | from yowsup.common.http.warequest import WARequest
from yowsup.common.http.waresponseparser import JSONResponseParser
import socket, ssl, os, hashlib, sys
from time import sleep
import threading
import logging
from yowsup.common.tools import MimeTools
import base64
import hmac
import binascii, requests
from Crypto.Cipher import AES
from axolotl.kdf.hkdfv3 import HKDFv3
from axolotl.sessioncipher import pad
from axolotl.util.byteutil import ByteUtil
from .protocolentities.message_media_downloadable import DownloadableMediaMessageProtocolEntity
logger = logging.getLogger(__name__)
class MediaUploader(WARequest, threading.Thread):
def __init__(self, jid, accountJid, sourcePath, uploadUrl, resumeOffset = 0, successClbk = None, errorClbk = None, progressCallback = None, async = True):
WARequest.__init__(self)
self.async = async
self.jid = jid
self.accountJid = accountJid
self.sourcePath = sourcePath
self.uploadUrl = uploadUrl
self.resumeOffset = resumeOffset
self.successCallback = successClbk
self.errorCallback = errorClbk
self.progressCallback = progressCallback
self.pvars = ["name", "type", "size", "url", "error", "mimetype", "filehash", "width", "height"]
self.setParser(JSONResponseParser())
self.sock = socket.socket()
def start(self):
if self.async:
threading.Thread.__init__(self)
super(MediaUploader, self).start()
else:
self.run()
def pad(self,s):
# return s + (16 - len(s) % 16) * chr(16 - len(s) % 16)
y = (16 - len(s) % 16) * chr(16 - len(s) % 16)
a = s + y.encode()
return a
def getKey(self, filetype):
if "video" in filetype:
return DownloadableMediaMessageProtocolEntity.VIDEO_KEY
elif "image" in filetype:
return DownloadableMediaMessageProtocolEntity.IMAGE_KEY
elif "audio" in filetype:
return DownloadableMediaMessageProtocolEntity.AUDIO_KEY
elif "application" in filetype:
return DownloadableMediaMessageProtocolEntity.DOCUMENT_KEY
elif "text" in filetype:
return DownloadableMediaMessageProtocolEntity.DOCUMENT_KEY
raise Exception ("FILE TYPE NOT SUPPORTED")
def encryptMedia(self, img, refkey,filetype):
key = self.getKey(filetype)
derivative = HKDFv3().deriveSecrets(binascii.unhexlify(refkey),
binascii.unhexlify(key), 112)
parts = ByteUtil.split(derivative, 16, 32)
iv = parts[0]
cipherKey = parts[1]
macKey = derivative[48:80]
mac = hmac.new(macKey,digestmod=hashlib.sha256)
mac.update(iv)
cipher = AES.new(key=cipherKey, mode=AES.MODE_CBC, IV=iv)
imgEnc = cipher.encrypt(self.pad(img))
mac.update(imgEnc)
hash = mac.digest()
hashKey = ByteUtil.trim(mac.digest(), 10)
finalEnc = imgEnc + hashKey
return finalEnc
def run(self):
sourcePath = self.sourcePath
uploadUrl = self.uploadUrl
try:
filename = os.path.basename(sourcePath)
filetype = MimeTools.getMIME(filename)
f = open(sourcePath, 'rb')
stream = f.read()
f.close()
refkey = binascii.hexlify(os.urandom(32))
stream = self.encryptMedia(stream,refkey,filetype)
fenc = open(sourcePath+".enc", 'wb')
fenc.write(stream)
fenc.seek(0, 2)
filesize=fenc.tell()
fenc.close()
os.remove(sourcePath + ".enc")
filesize2 = len(stream)
sha1 = hashlib.sha256()
sha1.update(stream)
b64Hash = base64.b64encode(sha1.digest())
file_enc_sha256 = hashlib.sha256(stream).hexdigest()
#self.sock.connect((self.url, self.port))
#ssl_sock = ssl.wrap_socket(self.sock)
m = hashlib.md5()
m.update(filename.encode())
crypto = m.hexdigest() + os.path.splitext(filename)[1]
digTo = hmac.new("".encode("utf-8"), self.jid.replace("@s.whatsapp.net", "@c.us").encode("utf-8"),
hashlib.sha256).digest()[:20]
refTo = base64.b64encode(digTo).decode()
digFrom = hmac.new("".encode("utf-8"), self.accountJid.replace("@s.whatsapp.net", "@c.us").encode("utf-8"),
hashlib.sha256).digest()[:20]
refFrom = base64.b64encode(digFrom).decode()
hBAOS = "------zzXXzzYYzzXXzzQQ\r\n"
hBAOS += "Content-Disposition: form-data; name=\"hash\"\r\n\r\n"
hBAOS += b64Hash.decode() + "\r\n"
hBAOS += "------zzXXzzYYzzXXzzQQ\r\n"
hBAOS += "Content-Disposition: form-data; name=\"refs\"\r\n\r\n"
hBAOS += refTo + "\r\n"
hBAOS += refFrom + "\r\n"
hBAOS += "------zzXXzzYYzzXXzzQQ\r\n"
hBAOS += "Content-Disposition: form-data; name=\"file\"; filename=\"" + "blob" + "\"\r\n"
hBAOS += "Content-Type: " + "application/octet-stream" + "\r\n\r\n"
fBAOS = "\r\n------zzXXzzYYzzXXzzQQ--"
contentLength = len(hBAOS) + len(fBAOS) + len(stream)
headers = {
"content-length": str(contentLength),
"user-agent": self.getUserAgent(),
"content-type": "multipart/form-data; boundary=----zzXXzzYYzzXXzzQQ"}
data = bytearray(hBAOS, 'utf-8') + stream + bytearray(fBAOS, 'utf-8')
response = requests.post(uploadUrl, data=data, headers=headers)
#lines = data.decode().splitlines()
result = None
if response.text.startswith("{"):
result = self.parser.parse(response.text, self.pvars)
if not result:
raise Exception("json data not found")
if result["url"] is not None:
if self.successCallback:
# self.successCallback(sourcePath, self.jid, result["url"])
result["mediaKey"]=refkey
result["file_enc_sha256"]=file_enc_sha256
self.successCallback(sourcePath, self.jid, result)
else:
logger.exception("uploadUrl: %s, result of uploading media has no url" % uploadUrl)
if self.errorCallback:
self.errorCallback(sourcePath, self.jid, uploadUrl)
except:
logger.exception("Error occured at transfer %s"%sys.exc_info()[1])
if self.errorCallback:
self.errorCallback(sourcePath, self.jid, uploadUrl)
| 37.826816 | 158 | 0.586915 |
79581979f4a3c71436761d0cf992247ca677d3b9 | 2,110 | py | Python | packet_error_rate_helper.py | LoMesh-ORG/LoMeshMODBUS-PCUI | 2a034e167279eb79bcfe9b77d64cd64b8891b6ea | [
"Apache-2.0"
] | null | null | null | packet_error_rate_helper.py | LoMesh-ORG/LoMeshMODBUS-PCUI | 2a034e167279eb79bcfe9b77d64cd64b8891b6ea | [
"Apache-2.0"
] | null | null | null | packet_error_rate_helper.py | LoMesh-ORG/LoMeshMODBUS-PCUI | 2a034e167279eb79bcfe9b77d64cd64b8891b6ea | [
"Apache-2.0"
] | null | null | null | import stopit
import serial
import re
import time
def ping_test(target, timeoutval, ser):
ser.reset_input_buffer()
ser.write(b'AT+SEND:' + target.encode('utf-8') + b'=ping\r\n')
responcestr = ser.read_until().decode('utf-8')
if("NOT OK" in responcestr):
print("Error in sending message")
return -1
print("Sent message id", responcestr)
msg_id = re.search('%s(.*)%s' % (":", "\r"), responcestr).group(1)
looking_for = b'ACK:' + msg_id.encode('utf-8')
print("Looking for", looking_for)
print("Sent message id ", int(msg_id, 10))
return_code = -1
#Now loop here till you hear back ACK for the message
with stopit.ThreadingTimeout(timeoutval) as to_ctx_mgr:
assert to_ctx_mgr.state == to_ctx_mgr.EXECUTING
while(1):
ser.write(b'AT+RECV\r\n')
responcestr = ser.read_until()
print(responcestr, responcestr.find(looking_for))
if(responcestr.find(looking_for) >= 0):
#found the message id we were looking for
print("Found ACK")
return_code = 0
to_ctx_mgr.state == to_ctx_mgr.EXECUTED
break
time.sleep(0.25)
# OK, let's check what happened
if to_ctx_mgr.state == to_ctx_mgr.EXECUTED:
# All's fine, everything was executed within 10 seconds
print("Got ACK")
elif to_ctx_mgr.state == to_ctx_mgr.EXECUTING:
# Hmm, that's not possible outside the block
pass
elif to_ctx_mgr.state == to_ctx_mgr.TIMED_OUT:
# Eeek the 10 seconds timeout occurred while executing the block
print("Timed out")
elif to_ctx_mgr.state == to_ctx_mgr.INTERRUPTED:
# Oh you raised specifically the TimeoutException in the block
pass
elif to_ctx_mgr.state == to_ctx_mgr.CANCELED:
# Oh you called to_ctx_mgr.cancel() method within the block but it
# executed till the end
pass
else:
# That's not possible
pass
return return_code | 39.074074 | 75 | 0.605213 |
79581a27cc4e2af9040be44d960fbe23c9f1e094 | 1,700 | py | Python | drapache/util/subdomain_managers.py | louissobel/Drapache | 564aaba08ee6929043ccd68027c6b01920dbb40a | [
"MIT"
] | 9 | 2015-03-20T05:48:37.000Z | 2018-12-17T09:32:31.000Z | drapache/util/subdomain_managers.py | louissobel/Drapache | 564aaba08ee6929043ccd68027c6b01920dbb40a | [
"MIT"
] | null | null | null | drapache/util/subdomain_managers.py | louissobel/Drapache | 564aaba08ee6929043ccd68027c6b01920dbb40a | [
"MIT"
] | 2 | 2018-01-28T11:23:58.000Z | 2018-07-30T23:38:51.000Z | """
Module for handling users, the oauth tokens
THIS COULD AND SHOULD BE OPTIMIZE WITH CACHEING!!!!
"""
import mysql_connect
class SubdomainException(Exception):
pass
class SubdomainManager:
"""
Base class for subdomain manager
really an interface
"""
def get_token(self,subdomain):
"""
Returns a tuple of (oauth_token,oauth_token_secret)
If it exists, or None if it does not
Raises a SubdomainException if there is a problem looking up the subdomain
"""
raise SubdomainException("get token not implemented")
class MysqlSubdomainManager(SubdomainManager):
def __init__(self,mysql_dict):
self.db_connection = mysql_connect.DBConnection(mysql_dict)
def get_token(self,subdomain):
"""
returns a (oauth_token,oauth_token_secret) tuple for the given user, or None
"""
try:
SUBDOMAIN_QUERY = "SELECT oauth_token,oauth_token_secret FROM subdomains WHERE subdomain=%s"
result = self.db_connection.execute_query(SUBDOMAIN_QUERY,subdomain)
result_list = list(result)
if result_list:
row = result_list[0]
return (row['oauth_token'],row['oauth_token_secret'])
else:
return None
except Exception as e:
raise SubdomainException(e.message)
class FlatFileSubdomainManager(SubdomainManager):
def __init__(self,filename):
"""
reads the file into memory
subdomain|oauth_token|oauth_token_secret
"""
self.subdomains_oauth_map = {}
f = open(filename)
for line in f:
line = line.strip()
subdomain,oauth_token,oauth_token_secret = line.split('|')
self.subdomains_oauth_map[subdomain] = (oauth_token,oauth_token_secret)
f.close()
def get_token(self,subdomain):
return self.subdomains_oauth_map.get(subdomain)
| 26.5625 | 95 | 0.748824 |
79581b90cfeeb757a025b6fedb24aae908e1609e | 303 | py | Python | utils/channels/permissions.py | pablo-moreno/shitter-back | 33c0eb0e0bdff370b68148308ac08ab63b9e6e54 | [
"MIT"
] | null | null | null | utils/channels/permissions.py | pablo-moreno/shitter-back | 33c0eb0e0bdff370b68148308ac08ab63b9e6e54 | [
"MIT"
] | null | null | null | utils/channels/permissions.py | pablo-moreno/shitter-back | 33c0eb0e0bdff370b68148308ac08ab63b9e6e54 | [
"MIT"
] | null | null | null | class Permission(object):
def __init__(self, scope):
self.scope = scope
def has_permission(self, *args, **kwargs):
raise NotImplementedError('You must implement has_permission method!')
class Any(Permission):
def has_permission(self, *args, **kwargs):
return True
| 25.25 | 78 | 0.676568 |
79581bf8a985adf122dd37d3d3a9bebcd3ec2fbc | 1,148 | py | Python | setup.py | shipyardapp/azurestorage-blueprints | 27f3a5e3e56b19c0ef414fae60dd77ac5ccb9d5a | [
"Apache-2.0"
] | 1 | 2021-02-06T23:01:47.000Z | 2021-02-06T23:01:47.000Z | setup.py | shipyardapp/azurestorage-blueprints | 27f3a5e3e56b19c0ef414fae60dd77ac5ccb9d5a | [
"Apache-2.0"
] | null | null | null | setup.py | shipyardapp/azurestorage-blueprints | 27f3a5e3e56b19c0ef414fae60dd77ac5ccb9d5a | [
"Apache-2.0"
] | 1 | 2021-02-06T23:01:52.000Z | 2021-02-06T23:01:52.000Z | from pathlib import Path
from pkg_resources import parse_requirements
from setuptools import find_packages, setup
for path in Path('./').rglob('requirements.txt'):
with Path(path).open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in parse_requirements(requirements_txt)
]
config = {
"description": "Simplified data pipeline blueprints for working with Azure Storage.",
"author": "Shipyard Team",
"url": "https: // www.shipyardapp.com",
"author_email": "tech@shipyardapp.com",
"packages": find_packages(),
"install_requires": install_requires,
"name": "azurestorage-blueprints",
"version": "v0.1.0",
"license": "Apache-2.0",
"classifiers": [
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Other Audience",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
"python_requires": ">=3.7"}
setup(**config)
| 31.027027 | 89 | 0.634146 |
79581c7d5a7a4131724b8d87e2964854566bb4aa | 1,356 | py | Python | antistasi_logbook/__main__.py | Giddius/Antistasi_Logbook | b2b520db1a54df484984876c7dfdb724703fed77 | [
"MIT"
] | 2 | 2022-01-12T22:45:56.000Z | 2022-03-10T14:23:36.000Z | antistasi_logbook/__main__.py | Giddius/Antistasi_Logbook | b2b520db1a54df484984876c7dfdb724703fed77 | [
"MIT"
] | null | null | null | antistasi_logbook/__main__.py | Giddius/Antistasi_Logbook | b2b520db1a54df484984876c7dfdb724703fed77 | [
"MIT"
] | null | null | null | """
WiP.
Soon.
"""
# region [Imports]
# * Standard Library Imports ---------------------------------------------------------------------------->
from typing import TYPE_CHECKING
from pathlib import Path
# * Gid Imports ----------------------------------------------------------------------------------------->
from gidapptools import get_logger
from gidapptools.meta_data import get_meta_info, get_meta_paths
from gidapptools.meta_data.interface import get_meta_config
# * Local Imports --------------------------------------------------------------------------------------->
from antistasi_logbook import setup
from antistasi_logbook.gui.main_window import start_gui
setup()
# * Type-Checking Imports --------------------------------------------------------------------------------->
if TYPE_CHECKING:
from gidapptools.gid_config.interface import GidIniConfig
# endregion[Imports]
# region [TODO]
# endregion [TODO]
# region [Logging]
# endregion[Logging]
# region [Constants]
THIS_FILE_DIR = Path(__file__).parent.absolute()
META_PATHS = get_meta_paths()
META_INFO = get_meta_info()
CONFIG: "GidIniConfig" = get_meta_config().get_config('general')
CONFIG.config.load()
log = get_logger(__name__)
# endregion[Constants]
def main():
start_gui()
# region[Main_Exec]
if __name__ == '__main__':
main()
# endregion[Main_Exec]
| 22.229508 | 108 | 0.570059 |
79581c87fe252e1ce90d856f64051955c87f1a7b | 2,376 | py | Python | Code/Dash App/app.py | zachlim98/carloancalc | d10e2de5748026e8fd60b6a9734f4666467c40fb | [
"MIT"
] | null | null | null | Code/Dash App/app.py | zachlim98/carloancalc | d10e2de5748026e8fd60b6a9734f4666467c40fb | [
"MIT"
] | null | null | null | Code/Dash App/app.py | zachlim98/carloancalc | d10e2de5748026e8fd60b6a9734f4666467c40fb | [
"MIT"
] | null | null | null | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div([
html.Div([
dcc.Input(
id='carprice',
min=50000,
value='',
placeholder="Retail Price",
type="number",
style={'text-align':'center'}
),
dcc.Input(
id='monthexp',
min=500,
value='',
placeholder="Monthly Expenses",
type="number",
style={'text-align':'center'}
)], style=dict(display='flex', justifyContent='center')),
html.Div([
dcc.Input(
id='intrate',
min=0.01,
value='',
placeholder="Interest Rates (%)",
type="number",
style={'text-align':'center'}
)], style=dict(display='flex', justifyContent='center')),
html.Hr(),
dcc.Graph(id='graph-car-price')
])
@app.callback(
Output('graph-car-price', 'figure'),
[Input('carprice', 'value'),
Input('monthexp','value'),
Input('intrate','value'),
])
def update_figure(carprice, monthexp, intrate):
downpayment_list = [i for i in range(int(carprice*0.3),int(carprice),200)]
# create dataframe
car_loan_df = pd.DataFrame({"Downpayment" : downpayment_list
})
# add total cost of car to dataframe
for z in range(1,8):
car_loan_df["{} Year".format(z)] = [(((intrate/100)*z*(carprice - downpayment_list[i])+(carprice - downpayment_list[i])))+downpayment_list[i]+monthexp for i in range(0,len(downpayment_list))]
# melt for easier plotting
car_melt = pd.melt(car_loan_df, id_vars="Downpayment")
fig = px.line(car_melt,x="Downpayment",y="value",color="variable",labels={
"Downpayment": "Initial Downpayment",
"value": "Total Cost of Car",
"variable": "Loan Term"
}, color_discrete_sequence=px.colors.qualitative.Bold)
fig.update_layout({"plot_bgcolor":"white"})
fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='lightgrey')
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='lightgrey')
fig.update_layout(transition_duration=500)
return fig
if __name__ == '__main__':
app.run_server | 31.263158 | 199 | 0.609428 |
79581c8d586c230b9e115f5f14ffe120b8a56341 | 9,243 | py | Python | python3_cron_scripts/fetch_azure_dns.py | bhumikaSinghal/Marinus | d64cf6217e422bb0be8b19f50b9a63e01d7b0783 | [
"Apache-2.0"
] | null | null | null | python3_cron_scripts/fetch_azure_dns.py | bhumikaSinghal/Marinus | d64cf6217e422bb0be8b19f50b9a63e01d7b0783 | [
"Apache-2.0"
] | null | null | null | python3_cron_scripts/fetch_azure_dns.py | bhumikaSinghal/Marinus | d64cf6217e422bb0be8b19f50b9a63e01d7b0783 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# Copyright 2019 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
This script is for Azure customers who use Azure DNS and have credentials to the service.
The script will iterate through all zones for the configured subscription ID / tenant ID.
It will insert the identified public records uses the source of "azure-" + resourceGroups.
This script is based on the Azure Python SDK:
https://docs.microsoft.com/en-us/python/api/azure-mgmt-dns/azure.mgmt.dns?view=azure-python
"""
import logging
from datetime import datetime
from azure.mgmt.dns.models import ZoneType
from libs3 import AzureConnector, DNSManager, JobsManager, MongoConnector, ZoneIngestor
from libs3.LoggingUtil import LoggingUtil
from libs3.ZoneManager import ZoneManager
def split_id(url_id):
"""
Data for the response is encoded in the ID URL
"""
parts = url_id.split("/")
data = {}
for i in range(1, len(parts) - 1, 2):
data[parts[i]] = parts[i + 1]
return data
def process_soa_record(logger, entry):
"""
Convert the Azure SOA record object into Marinus information
"""
soa = entry.soa_record
value = soa.host[:-1]
value += " " + soa.email
value += " " + str(soa.serial_number)
value += " " + str(soa.refresh_time)
value += " " + str(soa.retry_time)
value += " " + str(soa.expire_time)
value += " " + str(soa.minimum_ttl)
logger.debug("SOA: " + value)
results = []
results.append({"fqdn": entry.fqdn[:-1], "type": "soa", "value": value})
return results
def process_arecords(logger, entry):
"""
Convert the Azure A record object into Marinus information
"""
results = []
for arecord in entry.arecords:
logger.debug("A: " + entry.fqdn[:-1] + " : " + arecord.ipv4_address)
results.append(
{"fqdn": entry.fqdn[:-1], "type": "a", "value": arecord.ipv4_address}
)
return results
def process_ns_records(logger, entry):
"""
Convert the Azure NS record object into Marinus information
"""
results = []
for ns_record in entry.ns_records:
logger.debug("NS: " + entry.fqdn[:-1] + " : " + ns_record.nsdname)
results.append(
{"fqdn": entry.fqdn[:-1], "type": "ns", "value": ns_record.nsdname[:-1]}
)
return results
def process_mx_records(logger, entry):
"""
Convert the Azure MX record object into Marinus information
"""
results = []
for mx_record in entry.mx_records:
value = str(mx_record.preference) + " " + mx_record.exchange
logger.debug("MX: " + entry.fqdn[:-1] + " : " + value)
results.append({"fqdn": entry.fqdn[:-1], "type": "mx", "value": value})
return results
def process_cname_record(logger, entry):
"""
Convert the Azure CNAME record object into Marinus information
"""
logger.debug("CNAME: " + entry.fqdn[:-1] + " : " + entry.cname_record.cname)
results = []
results.append(
{"fqdn": entry.fqdn[:-1], "type": "cname", "value": entry.cname_record.cname}
)
return results
def process_aaaa_records(logger, entry):
"""
Convert the Azure AAAA record object into Marinus information
"""
results = []
for aaaa_record in entry.aaaa_records:
logger.debug("AAAA: " + entry.fqdn[:-1] + " : " + aaaa_record.ipv6_address)
results.append(
{"fqdn": entry.fqdn[:-1], "type": "aaaa", "value": aaaa_record.ipv6_address}
)
return results
def process_txt_records(logger, entry):
"""
Convert the Azure TXT record object into Marinus information
"""
results = []
for txt_record in entry.txt_records:
text_value = ""
for txt in txt_record.value:
text_value += txt
logger.debug("TXT: " + entry.fqdn[:-1] + " : " + text_value)
results.append({"fqdn": entry.fqdn[:-1], "type": "txt", "value": text_value})
return results
def process_ptr_records(logger, entry):
"""
Convert the Azure PTR record object into Marinus information
"""
results = []
for ptr_record in entry.ptr_records:
logger.debug("PTR: " + entry.fqdn + " : " + ptr_record.ptrdname)
results.append(
{"fqdn": entry.fqdn[:-1], "type": "ptr", "value": ptr_record.ptrdname}
)
return results
def process_srv_records(logger, entry):
"""
Convert the Azure SRV record object into Marinus information
"""
results = []
for srv_record in entry.srv_records:
value = (
str(srv_record.priority)
+ " "
+ str(srv_record.weight)
+ " "
+ str(srv_record.port)
+ " "
+ srv_record.target
)
logger.debug("SRV: " + value)
results.append({"fqdn": entry.fqdn[:-1], "type": "srv", "value": value})
return results
def extract_record_set_value(logger, field, entry):
"""
Call the approprite function for the given field type.
"""
if field == "A":
# The missing underscore is intentional. MS was inconsistent.
return process_arecords(logger, entry)
elif field == "AAAA":
return process_aaaa_records(logger, entry)
elif field == "MX":
return process_mx_records(logger, entry)
elif field == "NS":
return process_ns_records(logger, entry)
elif field == "PTR":
return process_ptr_records(logger, entry)
elif field == "SRV":
return process_srv_records(logger, entry)
elif field == "TXT":
return process_txt_records(logger, entry)
elif field == "CNAME":
return process_cname_record(logger, entry)
elif field == "SOA":
return process_soa_record(logger, entry)
else:
logger.warning("Unknown Record Set Type")
def main():
"""
Begin Main...
"""
logger = LoggingUtil.create_log(__name__)
now = datetime.now()
print("Starting: " + str(now))
logger.info("Starting...")
azure_connector = AzureConnector.AzureConnector()
mongo_connector = MongoConnector.MongoConnector()
dns_manager = DNSManager.DNSManager(mongo_connector)
zone_ingestor = ZoneIngestor.ZoneIngestor()
jobs_manager = JobsManager.JobsManager(mongo_connector, "fetch_azure_dns")
jobs_manager.record_job_start()
current_zones = ZoneManager.get_distinct_zones(mongo_connector)
resource_client = azure_connector.get_resources_client()
resources = []
# The resource list is not currently used.
for item in resource_client.resource_groups.list():
resources.append(item.name)
dns_client = azure_connector.get_dns_client()
zones = dns_client.zones.list()
# The type of records the Azure DNS will let you configure
record_types = {
"A": "arecords",
"AAAA": "aaaa_records",
"MX": "mx_records",
"NS": "ns_records",
"PTR": "ptr_records",
"SRV": "srv_records",
"TXT": "txt_records",
"CNAME": "cname_record",
"SOA": "soa_record",
}
for zone in zones:
logger.info("Zone: " + zone.name)
data = split_id(zone.id)
if zone.zone_type == ZoneType.public:
logger.info(zone.name + " is public:")
if zone.name not in current_zones:
logger.debug("Creating zone: " + zone.name)
zone_ingestor.add_zone(zone.name, "azure:" + data["resourceGroups"])
try:
logger.info("ResourceGroup: " + data["resourceGroups"])
records = dns_client.record_sets.list_all_by_dns_zone(
data["resourceGroups"], zone.name
)
for entry in records:
# The record_data id value ends in rtype/rvalue so you must guess the rtype
record_data = split_id(entry.id)
for rtype in record_types:
if rtype in record_data:
results = extract_record_set_value(logger, rtype, entry)
for result in results:
result["zone"] = zone.name
result["created"] = datetime.now()
result["status"] = "confirmed"
dns_manager.insert_record(
result, "azure:" + data["resourceGroups"]
)
except:
logger.warning("No records found")
jobs_manager.record_job_complete()
now = datetime.now()
print("Complete: " + str(now))
logger.info("Complete.")
if __name__ == "__main__":
main()
| 31.65411 | 95 | 0.610624 |
79581df82df351ea359a0c321dbc3861f3812a4f | 1,533 | py | Python | profiles_api/views.py | snidu001/rest-api | 75aa4368546de306fa3ac32ff437b29adff6ff02 | [
"MIT"
] | null | null | null | profiles_api/views.py | snidu001/rest-api | 75aa4368546de306fa3ac32ff437b29adff6ff02 | [
"MIT"
] | 7 | 2019-12-05T00:00:31.000Z | 2022-02-10T10:19:20.000Z | profiles_api/views.py | snidu001/rest-api | 75aa4368546de306fa3ac32ff437b29adff6ff02 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from profiles_api import serializers
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Return a list of APIView feautures"""
an_apiview = [
'Uses HTTP methods as function (get,post,patch,put,delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None) :
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle partial update of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
| 31.9375 | 72 | 0.617743 |
79581e72b18ec7765bc0c2996ad51066eecc9e58 | 356 | py | Python | presentations/session_2/redis_examples/redis_others/redis_sub_py3.py | NanoDataCenter/nano_data_center | 76ad521e1a5139a37df80214af1413d2fd4ade60 | [
"MIT"
] | 2 | 2018-02-21T03:46:51.000Z | 2019-12-24T16:40:51.000Z | presentations/session_2/redis_examples/redis_others/redis_sub_py3.py | NanoDataCenter/nano_data_center | 76ad521e1a5139a37df80214af1413d2fd4ade60 | [
"MIT"
] | 7 | 2020-07-16T19:54:08.000Z | 2022-03-02T03:29:07.000Z | presentations/session_2/redis_examples/redis_others/redis_sub_py3.py | NanoDataCenter/nano_data_center | 76ad521e1a5139a37df80214af1413d2fd4ade60 | [
"MIT"
] | 2 | 2018-04-16T07:02:35.000Z | 2020-07-23T21:57:19.000Z | import redis
import json
import time
redis_handle = redis.StrictRedis("127.0.0.1", 6379 ,0,decode_responses = True )
subscribe_object = redis_handle.pubsub()
subscribe_object.subscribe("redis_pub")
#
# Can add many keys or channels
#
while True:
for item in subscribe_object.listen(): ## iterate over the channes
print( item ) | 22.25 | 80 | 0.707865 |
79581f42dfd249d1d5dcbcb056e16e30ec5384e3 | 1,731 | py | Python | TestFromZHJW.py | AnCoSONG/SCUCaptchaRecognizer | 09e1255d34cfc576eba451a72bf7a988c4893f9b | [
"MIT"
] | 1 | 2019-07-01T08:23:16.000Z | 2019-07-01T08:23:16.000Z | TestFromZHJW.py | AnCoSONG/SCUCaptchaRecognizer | 09e1255d34cfc576eba451a72bf7a988c4893f9b | [
"MIT"
] | 1 | 2019-08-09T14:13:25.000Z | 2019-08-09T14:13:25.000Z | TestFromZHJW.py | AnCoSONG/SCUCaptchaRecognizer | 09e1255d34cfc576eba451a72bf7a988c4893f9b | [
"MIT"
] | null | null | null | from keras import models
import cv2 as cv
import os
import numpy as np
import requests
import random
import matplotlib.pyplot as plt
try:
model1 = models.load_model('models/0PosRecognize.h5')
model2 = models.load_model('models/1PosRecognize.h5')
model3 = models.load_model('models/2PosRecognize.h5')
model4 = models.load_model('models/3PosRecognize.h5')
print('Load Successfully!')
except:
print("Load Unsuccessfully!Please train a new model!")
rand_num = random.randint(0,1000)
base_url = "http://zhjw.scu.edu.cn/img/captcha.jpg?"
intact_url = base_url+str(rand_num)
ret = requests.get(intact_url)
if ret.status_code == 200:
with open('captcha.jpg','wb') as f:
for chuck in ret:
f.write(chuck)
img = cv.imread('captcha.jpg')
img = cv.cvtColor(img,cv.COLOR_BGR2RGB)
img = cv.resize(img, (96,32))
# 图片预处理
img_normalize = img.astype('float32')/255
t = []
t.append(img_normalize)
test = np.array(t)
pos0 = model1.predict_classes(test)
pos1 = model2.predict_classes(test)
pos2 = model3.predict_classes(test)
pos3 = model4.predict_classes(test)
def code2name(code):
dict = ['0', '1', '2', '3', '4', '5', '6',
'7', '8', '9', 'a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y',
'z']
return dict[int(code)]
res = code2name(*pos0)+code2name(*pos1)+code2name(*pos2)+code2name(*pos3)
import matplotlib.pyplot as plt
def plot_images_prediction(img, res):
plt.figure('Result Of CNN')
plt.imshow(img)
plt.title("Prediction "+res)
plt.show()
plot_images_prediction(img,res)
# plt.figure()
# plt.imshow(img)
# plt.show()
| 22.776316 | 73 | 0.632582 |
79581f99f8da0e86a08f1ffed0ec5eb7053d6c45 | 11,303 | py | Python | spec_cleaner/rpmregexp.py | dcermak/spec-cleaner | 917e35c09d054b5545806ab1e9ce408e9a517de6 | [
"BSD-3-Clause"
] | null | null | null | spec_cleaner/rpmregexp.py | dcermak/spec-cleaner | 917e35c09d054b5545806ab1e9ce408e9a517de6 | [
"BSD-3-Clause"
] | null | null | null | spec_cleaner/rpmregexp.py | dcermak/spec-cleaner | 917e35c09d054b5545806ab1e9ce408e9a517de6 | [
"BSD-3-Clause"
] | null | null | null | # vim: set ts=4 sw=4 et: coding=UTF-8
import re
from typing import List
class Regexp(object):
"""
Singleton containing all regular expressions compiled in one run.
So we can use them later everywhere without compiling them again,
"""
# section macros
re_spec_package = re.compile(r'^%package(\s+|$)', re.IGNORECASE)
re_spec_description = re.compile(r'^%description(\s+|$)', re.IGNORECASE)
re_spec_prep = re.compile(r'^%prep\s*$', re.IGNORECASE)
re_spec_build = re.compile(r'^%build\s*$', re.IGNORECASE)
re_spec_install = re.compile(r'^%install\s*$', re.IGNORECASE)
re_spec_clean = re.compile(r'^%clean\s*$', re.IGNORECASE)
re_spec_check = re.compile(r'^%check\s*$', re.IGNORECASE)
re_spec_scriptlets = re.compile(
r'(?:^%pretrans(\s+|$))|(?:^%pre(\s+|$))|(?:^%post(\s+|$))|(?:^%verifyscript(\s+|$))|(?:^%preun(\s+|$))|(?:^%postun(\s+|$))|(?:^%posttrans(\s+|$))',
re.IGNORECASE,
)
re_spec_triggers = re.compile(
r'(?:^%filetriggerin(\s+|$))|(?:^%filetriggerun(\s+|$))|(?:^%filetriggerpostun(\s+|$))|(?:^%transfiletriggerin(\s+|$))|(?:^%transfiletriggerun(\s+|$))|(?:^%transfiletriggerpostun(\s+|$))',
re.IGNORECASE,
)
re_spec_files = re.compile(r'^%files(\s+|$)', re.IGNORECASE)
re_spec_changelog = re.compile(r'^%changelog\s*$', re.IGNORECASE)
# rpmpreamble
# WARNING: keep in sync with rpmcleaner Section change detection
re_if = re.compile(r'^\s*(?:%{?if\s|%{?ifarch\s|%{?ifnarch\s|%{?if\S*}?(\s.*|)$)', re.IGNORECASE)
re_codeblock = re.compile(r'^\s*((### COMMON-([a-zA-Z0-9]+)-BEGIN ###|# MANUAL BEGIN|# SECTION)(\s.*|)|# MANUAL)$', re.IGNORECASE)
re_else = re.compile(r'^\s*%else(\s.*|)$', re.IGNORECASE)
re_endif = re.compile(r'^\s*%endif(\s.*|)$', re.IGNORECASE)
re_endcodeblock = re.compile(r'^\s*(### COMMON-([a-zA-Z0-9]+)-END ###|# MANUAL END|# /MANUAL|# (END|/)SECTION)(\s.*|)$', re.IGNORECASE)
re_name = re.compile(r'^Name:\s*(\S*)', re.IGNORECASE)
re_version = re.compile(r'^Version:\s*(.*)', re.IGNORECASE)
re_release = re.compile(r'^Release:\s*(\S*)', re.IGNORECASE)
re_license = re.compile(r'^License:\s*(.*)', re.IGNORECASE)
re_summary = re.compile(r'^Summary:\s*(.*)', re.IGNORECASE)
re_summary_localized = re.compile(r'^Summary(\(\S+\)):\s*(.*)', re.IGNORECASE)
re_url = re.compile(r'^Url:\s*(\S*)', re.IGNORECASE)
re_group = re.compile(r'^Group:\s*(.*)', re.IGNORECASE)
re_vendor = re.compile(r'^Vendor:\s*(.*)', re.IGNORECASE)
re_source = re.compile(r'^Source(\d*):\s*(.*)', re.IGNORECASE)
re_nosource = re.compile(r'^NoSource:\s*(.*)', re.IGNORECASE)
re_patch = re.compile(r'^((?:#[#\s]*)?)Patch(\d*):\s*(\S*)', re.IGNORECASE)
re_buildrequires = re.compile(r'^(BuildRequires|BuildPreReq):\s*(.*)', re.IGNORECASE)
re_buildconflicts = re.compile(r'^BuildConflicts:\s*(.*)', re.IGNORECASE)
re_buildignores = re.compile(r'^#!BuildIgnore:\s*(.*)', re.IGNORECASE)
re_prereq = re.compile(r'^PreReq:\s*(.*)', re.IGNORECASE)
re_requires = re.compile(r'^Requires:\s*(.*)', re.IGNORECASE)
re_requires_phase = re.compile(r'^Requires(\([^)]+\)):\s*(.*)', re.IGNORECASE)
re_recommends = re.compile(r'^Recommends:\s*(.*)', re.IGNORECASE)
re_suggests = re.compile(r'^Suggests:\s*(.*)', re.IGNORECASE)
re_enhances = re.compile(r'^Enhances:\s*(.*)', re.IGNORECASE)
re_supplements = re.compile(r'^Supplements:\s*(.*)', re.IGNORECASE)
re_conflicts = re.compile(r'^Conflicts:\s*(.*)', re.IGNORECASE)
re_provides = re.compile(r'^Provides:\s*(.*)', re.IGNORECASE)
re_obsoletes = re.compile(r'^Obsoletes:\s*(.*)', re.IGNORECASE)
re_buildroot = re.compile(r'^\s*BuildRoot:', re.IGNORECASE)
re_buildarch = re.compile(r'^\s*BuildArch(itectures)?:\s*(.*)', re.IGNORECASE)
re_exclusivearch = re.compile(r'^\s*ExclusiveArch(itectures)?:\s*(.*)', re.IGNORECASE)
re_excludearch = re.compile(r'^\s*ExcludeArch(itectures)?:\s*(.*)', re.IGNORECASE)
re_epoch = re.compile(r'^\s*Epoch:\s*(.*)', re.IGNORECASE)
re_icon = re.compile(r'^\s*Icon:\s*(.*)', re.IGNORECASE)
re_copyright = re.compile(r'^\s*Copyright:\s*(.*)', re.IGNORECASE)
re_packager = re.compile(r'^\s*Packager:\s*(.*)', re.IGNORECASE)
re_define = re.compile(r'^\s*%define\s*(.*)', re.IGNORECASE)
re_global = re.compile(r'^\s*%global\s*(.*)', re.IGNORECASE)
re_bcond_with = re.compile(r'^\s*%bcond_with(out)?\s*(.*)', re.IGNORECASE)
re_autoreqprov = re.compile(r'^\s*AutoReqProv:.*$', re.IGNORECASE)
re_debugpkg = re.compile(r'^%{?(debug_package|___debug_install_post)}?\s*$', re.IGNORECASE)
re_py_requires = re.compile(r'^%{?\??py_requires}?\s*$', re.IGNORECASE)
re_mingw = re.compile(r'^\s*%{?_mingw.*$', re.IGNORECASE)
re_patterndefine = re.compile(r'^\s*%{?pattern_\S+}?\s*$', re.IGNORECASE)
re_patternmacro = re.compile(r'pattern(-\S+)?\(\)', re.IGNORECASE)
re_patternobsolete = re.compile(r'patterns-openSUSE-\S+', re.IGNORECASE)
re_tail_macros = re.compile(r'^%{?python_subpackages}?')
re_preamble_prefix = re.compile(r'^Prefix:\s*(.*)', re.IGNORECASE)
# grab all macros with rpm call that query for version, this still might
# be bit too greedy but it is good enough now
re_rpm_command = re.compile(r'%\(\s*(rpm|echo\s+`rpm).*--queryformat\s+\'%{?VERSION}?\'.*\)')
re_requires_eq = re.compile(r'^\s*(%{\?requires_eq:\s*)?%requires_eq\s*(.*)')
re_requires_ge = re.compile(r'^\s*(%{\?requires_ge:\s*)?%requires_ge\s*(.*)')
re_onelinecond = re.compile(r'^\s*%{!?[^?]*\?[^:]+:[^}]+}')
# Special bracketed deps dection
re_brackety_requires = re.compile(r'(pkgconfig|cmake|perl|tex|rubygem)\(')
re_version_separator = re.compile(r'(\S+)((\s*[<>=\s]+)(\S+))*')
# packageand(pkg1:pkg2)
re_packageand = re.compile(r'^packageand\(\s*(\S+)\s*:\s*(\S+)\s*\)\s*$')
# otherproviders(foo)
re_otherproviders = re.compile(r'^otherproviders\(\s*(\S+)\s*\)\s*$')
# rpmdescription
re_authors = re.compile(r'^\s*Author(s)?:\s*')
# rpmbuild
re_jobs = re.compile(r'%{?(_smp_mflags|\?_smp_flags|\?jobs:\s*-j\s*%(jobs|{jobs}))}?')
re_make = re.compile(r'(^\s*)make(\s.*|)$')
re_optflags_quotes = re.compile(r'=\s*\${?RPM_OPT_FLAGS}?\s*$')
re_optflags = re.compile(r'\${?RPM_OPT_FLAGS}?')
re_suseupdateconfig = re.compile(r'%{?\??suse_update_config')
re_configure = re.compile(r'(^|(.*\s)?)./configure(\s.*|)$')
re_cmake = re.compile(r'(^|(.*\s)?)cmake(\s.*|)$')
re_qmake5 = re.compile(r'(^|(.*\s)?)qmake-qt5(\s.*|)$')
re_meson = re.compile(r'(^|(.*\s)?)meson(\s.*|)$')
re_pytest = re.compile(r'%python_(expand|exec)\s+(PYTHONPATH=%{buildroot}%{\$?python_sitelib}\s+)?(\$?python\s+)?(%{_bindir}/?|-m\s+)?py\.?test(-(%{\$?python_version}|%{\$?python_bin_suffix})?)?(\s+(-v|-o addopts=-v))?')
re_pytest_arch = re.compile(r'%python_(expand|exec)\s+(PYTHONPATH=%{buildroot}%{\$?python_sitearch}\s+)?(\$?python\s+)?(%{_bindir}/?|-m\s+)?py\.?test(-(%{\$?python_version}|%{\$?python_bin_suffix})?)?(\s+(-v|-o addopts=-v))?')
re_python_expand = re.compile(r'%{?(python_sitelib|python_sitearch|python_bin_suffix|python_version)}?')
re_python_interp_expand = re.compile(r'\s+(python)\s+')
# rpmcopyright
re_copyright_string = re.compile(r'^#\s*Copyright\ \(c\)\s*(.*)', re.IGNORECASE)
re_suse_copyright = re.compile(r'SUSE (LLC\.?|LINUX (Products )?GmbH, Nuernberg, Germany\.)\s*$', re.IGNORECASE)
re_rootforbuild = re.compile(r'^#\s*needsrootforbuild\s*$', re.IGNORECASE)
re_binariesforbuild = re.compile(r'^#\s*needsbinariesforbuild\s*$', re.IGNORECASE)
re_nodebuginfo = re.compile(r'^#\s*nodebuginfo\s*$', re.IGNORECASE)
re_sslcerts = re.compile(r'^#\s*needssslcertforbuild\s*$', re.IGNORECASE)
re_icecream = re.compile(r'^#\s*icecream\s*$', re.IGNORECASE)
re_vimmodeline = re.compile(r'^#\s*vim:', re.IGNORECASE)
re_skipcleaner = re.compile(r'^#\s*nospeccleaner\s*$', re.IGNORECASE)
# rpminstall
re_clean = re.compile(r'rm\s+(-?\w?\ ?)*"?(%{buildroot}|\$b)"?$')
re_install = re.compile(r'{0}*(%{{makeinstall}}|make{0}+install){0}*$'.format(r'(DESTDIR=%{buildroot}|%{\?_smp_mflags}|\s|V=1|VERBOSE=1|-j\d+)'))
re_rm = re.compile(r'rm\s+(-?\w?\ ?)*"?(%{buildroot}|\$b)"?/?"?%{_lib(dir)?}.*\*\.la;?$')
re_find = re.compile(r'find\s+"?(%{buildroot}|\$b)("?\S?/?)*\s*.*\s+-i?name\s+["\'\\]?\*\.la($|.*[^\\]$)')
re_find_double = re.compile(r'-i?name')
re_rm_double = re.compile(r'(\.|{)a')
# rpmprep
re_patch_prep = re.compile(r'^%patch\s*([^P]*)-P\s*(\d*)\s*([^P]*)$')
re_setup = re.compile(r'\s*-n\s+"?%{name}-%{version}"?($|\s)')
# rpmfiles
re_man_compression = re.compile(r'(\d)(\.?\*|\.gz|%{?ext_man}?)$')
re_info_compression = re.compile(r'\.info(\.?\*|\.gz|%{?ext_info}?)$')
re_defattr = re.compile(r'^\s*%defattr\s*\(\s*-\s*,\s*root\s*,\s*root\s*(,\s*-\s*)?\)\s*')
re_doclicense = re.compile(r'(\S+)?(LICEN(S|C)E|COPYING)(\*|\.(\*|\S+))?($|\s)', re.IGNORECASE)
# rpmscriptlets
re_ldconfig = re.compile(r'(^|(.*\s)?)%{?run_ldconfig}?(\s.*|)$', re.IGNORECASE)
# patches/sources
re_ptch = re.compile(r'%{P:(\d+)}')
re_src = re.compile(r'%{S:(\d+)}')
# comment detection
re_comment = re.compile(r'^$|^\s*#')
# macro detection
re_macro = re.compile(
# find start of macro:
# either beggining of string or something which is not '%' or :
# where : is used after macro declaration we should not curlify
r'(^|([^%:]))'
+
# macro itself:
# '%' followed by either number not starting with '0'
# or by chars where first is a-z or A-Z or underscore
r'%([1-9]\d*|[a-zA-Z_]\w*'
+
# possibly followed by parens
r'(\s*\([^)]*\))?'
+
# beyond the end of the macro
r')(|(\W))'
)
# cleaning path regexps
endmacro = r'([/\s%"]|$)'
re_oldprefix = re.compile(r'%{?_exec_prefix}?' + endmacro)
re_prefix = re.compile(r'(?<!\w)/usr' + endmacro)
re_bindir = re.compile(r'%{?_prefix}?/bin' + endmacro)
re_sbindir = re.compile(r'%{?_prefix}?/sbin' + endmacro)
re_libexecdir = re.compile(r'%{?_prefix}?/libexec' + endmacro)
re_includedir = re.compile(r'%{?_prefix}?/include' + endmacro)
re_datadir = re.compile(r'%{?_prefix}?/share' + endmacro)
re_mandir = re.compile(r'%{?_datadir}?/man' + endmacro)
re_infodir = re.compile(r'%{?_datadir}?/info' + endmacro)
re_docdir = re.compile(r'%{?_datadir}?/doc/packages' + endmacro)
re_initdir = re.compile(r'/etc/init.d' + endmacro)
re_sysconfdir = re.compile(r'/etc' + endmacro)
re_localstatedir = re.compile(r'/var' + endmacro)
re_libdir = re.compile(r'%{?_prefix}?/(%{?_lib}?|lib64)' + endmacro)
re_initddir = re.compile(r'%{?_initrddir}?' + endmacro)
re_rpmbuildroot = re.compile(r'(\${?RPM_BUILD_ROOT}?|"%{?buildroot}?")([/\s%]|$)')
re_rpmbuildroot_quotes = re.compile(r'"\${?RPM_BUILD_ROOT}?"')
# deprecated greps
re_deprecated_egrep_regex = re.compile(r'\begrep\b')
re_deprecated_fgrep_regex = re.compile(r'\bfgrep\b')
def __init__(self, keywords: List[str]) -> None:
self.re_unbrace_keywords = re.compile('%{(' + '|'.join(keywords) + ')}')
| 56.798995 | 230 | 0.604353 |
79582095ae31a63762ec600492e7931b7b204dce | 2,863 | py | Python | examples/views/persistent.py | kuzaku-developers/disnake | 61cc1ad4c2bafd39726a1447c85f7e469e41af10 | [
"MIT"
] | null | null | null | examples/views/persistent.py | kuzaku-developers/disnake | 61cc1ad4c2bafd39726a1447c85f7e469e41af10 | [
"MIT"
] | null | null | null | examples/views/persistent.py | kuzaku-developers/disnake | 61cc1ad4c2bafd39726a1447c85f7e469e41af10 | [
"MIT"
] | null | null | null | from disnake.ext import commands
import disnake
# Define a simple View that persists between bot restarts
# In order a view to persist between restarts it needs to meet the following conditions:
# 1) The timeout of the View has to be set to None
# 2) Every item in the View has to have a custom_id set
# It is recommended that the custom_id be sufficiently unique to
# prevent conflicts with other buttons the bot sends.
# For this example the custom_id is prefixed with the name of the bot.
# Note that custom_ids can only be up to 100 characters long.
class PersistentView(disnake.ui.View):
def __init__(self):
super().__init__(timeout=None)
@disnake.ui.button(label='Green', style=disnake.ButtonStyle.green, custom_id='persistent_view:green')
async def green(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message('This is green.', ephemeral=True)
@disnake.ui.button(label='Red', style=disnake.ButtonStyle.red, custom_id='persistent_view:red')
async def red(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message('This is red.', ephemeral=True)
@disnake.ui.button(label='Grey', style=disnake.ButtonStyle.grey, custom_id='persistent_view:grey')
async def grey(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message('This is grey.', ephemeral=True)
class PersistentViewBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned_or('$'))
self.persistent_views_added = False
async def on_ready(self):
if not self.persistent_views_added:
# Register the persistent view for listening here.
# Note that this does not send the view to any message.
# In order to do this you need to first send a message with the View, which is shown below.
# If you have the message_id you can also pass it as a keyword argument, but for this example
# we don't have one.
self.add_view(PersistentView())
self.persistent_views_added = True
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
bot = PersistentViewBot()
@bot.command()
@commands.is_owner()
async def prepare(ctx: commands.Context):
"""Starts a persistent view."""
# In order for a persistent view to be listened to, it needs to be sent to an actual message.
# Call this method once just to store it somewhere.
# In a more complicated program you might fetch the message_id from a database for use later.
# However this is outside of the scope of this simple example.
await ctx.send("What's your favourite colour?", view=PersistentView())
bot.run('token')
| 44.734375 | 105 | 0.718128 |
79582099b489842d653231894ac98c81eef1d1d7 | 1,986 | py | Python | models/data_utils/data_read_utils.py | aman313/deep_generalization | 2ef2a731a1b2e5c3ce16c7b66de86c57dba08a37 | [
"MIT"
] | null | null | null | models/data_utils/data_read_utils.py | aman313/deep_generalization | 2ef2a731a1b2e5c3ce16c7b66de86c57dba08a37 | [
"MIT"
] | null | null | null | models/data_utils/data_read_utils.py | aman313/deep_generalization | 2ef2a731a1b2e5c3ce16c7b66de86c57dba08a37 | [
"MIT"
] | null | null | null | import pandas as pd
import torch
import numpy as np
import ast
def one_hot_transformer(vocab):
vocab_index = {elem:index for index,elem in enumerate(vocab)}
def trans(str,max_len):
one_hot = torch.zeros(max_len,len(vocab))
for i in range(len(str)):
char = str[i]
try:
one_hot[i,vocab_index[char]] = 1
except KeyError:
print('element not in vocab ', char)
raise Exception
return one_hot
return trans
def batched_data_generator_from_file_with_replacement(file_name,batch_size,num_batches,transformer,data_type=np.int64):
data = pd.read_csv(file_name,dtype={'X': data_type, 'y': data_type})
def generate_batches():
for i in range(num_batches):
batch_data = data.sample(n = batch_size,replace=True)
X = batch_data.X.tolist()
y = batch_data.y.tolist()
print(X[0])
X,y = zip(*sorted(zip(X,y),key=lambda x:len(str(x[0])),reverse=True))
seq_lens = [len(str(x)) for x in X]
max_len = max(seq_lens)
yield ( [transformer(str(x),max_len) for x in X],torch.FloatTensor(y) )
return generate_batches
def batched_data_generator_from_file_with_replacement_for_string_to_seq_of_tuples(file_name,batch_size,num_batches,transformer):
data = pd.read_csv(file_name,dtype={'X': np.int64, 'y': np.str})
def generate_batches():
for i in range(num_batches):
batch_data = data.sample(n = batch_size,replace=True)
X = batch_data.X.tolist()
ystr = batch_data.y.tolist()
y=[ast.literal_eval(k) for k in ystr]
X,y = zip(*sorted(zip(X,y),key=lambda x:len(str(x[0])),reverse=True))
seq_lens = [len(str(x)) for x in X]
max_len = max(seq_lens)
# print (y)
yield ( [transformer(str(x),max_len) for x in X],torch.FloatTensor(y) )
return generate_batches | 43.173913 | 128 | 0.615811 |
795820c4a124f48fcd1c320c131a4a4d1be85067 | 1,920 | py | Python | projects/socketio_basic/app.py | basavyr/flask-development-tutorials | aa6081416f72b14a33fdf5294577e2a459487d84 | [
"MIT"
] | null | null | null | projects/socketio_basic/app.py | basavyr/flask-development-tutorials | aa6081416f72b14a33fdf5294577e2a459487d84 | [
"MIT"
] | 3 | 2022-03-22T10:15:55.000Z | 2022-03-22T18:17:39.000Z | projects/socketio_basic/app.py | basavyr/flask-development-tutorials | aa6081416f72b14a33fdf5294577e2a459487d84 | [
"MIT"
] | null | null | null | # Start with a basic flask app webpage.
from flask_socketio import SocketIO
from flask_socketio import send, emit
from flask import Flask, render_template, url_for, copy_current_request_context
from random import random
from time import sleep
from threading import Thread, Event
import src.local_tools as tools
# define the port and host that the app will run on
PORT = 6969
LOCALHOST = '127.0.0.1'
# define the flask app
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
# app.config['DEBUG'] = True
# define the socketio object
socketio = SocketIO(app)
# define the main page
@app.route("/", methods=['GET'])
def show_index():
return render_template('index.html', time=tools.get_time())
@socketio.on('connect')
def test_connect():
# this sends a dict to the client
emit('connection_response',
{
'message': 'Connection Established',
'time': tools.get_time()
})
@socketio.on('disconnect')
def test_disconnect():
pass
# print('Client disconnected')
# define the tree test channels
@socketio.event
def channel1(data):
emit('channel1 response', {'data': data})
@socketio.on('channel2')
def channel2(data):
print('server -> received args on channel2 from client: ' + data)
emit('channel2 response', {
'time': tools.get_time(),
'user': tools.get_uname(),
'content': data,
}
)
@socketio.event
def channel3(data):
emit('channel3 response', {'data': data})
# log any incoming message that was emitted by the client
@socketio.on('message')
def handle_unnamed_message(data):
print('...(received from client)...')
print(f'Server-side message: {data}')
print('............................')
emit('message', {'data': data})
def main():
# app.run(debug=True, port=PORT)
socketio.run(app, port=PORT, host=LOCALHOST, debug=True)
if __name__ == '__main__':
main()
| 22.325581 | 79 | 0.6625 |
7958210ca31ad18a15b14e2c2ab12e95721e51d4 | 6,931 | py | Python | tests/integration/api/v2010/account/test_outgoing_caller_id.py | BrimmingDev/twilio-python | 3226b5fed92b3c2ce64f03e6b19fc4792ef7647f | [
"MIT"
] | 1,362 | 2015-01-04T10:25:18.000Z | 2022-03-24T10:07:08.000Z | tests/integration/api/v2010/account/test_outgoing_caller_id.py | BrimmingDev/twilio-python | 3226b5fed92b3c2ce64f03e6b19fc4792ef7647f | [
"MIT"
] | 299 | 2015-01-30T09:52:39.000Z | 2022-03-31T23:03:02.000Z | tests/integration/api/v2010/account/test_outgoing_caller_id.py | BrimmingDev/twilio-python | 3226b5fed92b3c2ce64f03e6b19fc4792ef7647f | [
"MIT"
] | 622 | 2015-01-03T04:43:09.000Z | 2022-03-29T14:11:00.000Z | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class OutgoingCallerIdTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/OutgoingCallerIds/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "(415) 867-5309",
"phone_number": "+141586753096",
"date_created": "Fri, 21 Aug 2009 00:11:24 +0000",
"date_updated": "Fri, 21 Aug 2009 00:11:24 +0000",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/OutgoingCallerIds/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Fri, 21 Aug 2009 00:11:24 +0000",
"date_updated": "Fri, 21 Aug 2009 00:11:24 +0000",
"friendly_name": "friendly_name",
"phone_number": "+141586753096",
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/OutgoingCallerIds/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/OutgoingCallerIds.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json?PageSize=50&Page=0",
"next_page_uri": null,
"outgoing_caller_ids": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Fri, 21 Aug 2009 00:11:24 +0000",
"date_updated": "Fri, 21 Aug 2009 00:11:24 +0000",
"friendly_name": "(415) 867-5309",
"phone_number": "+141586753096",
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
],
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json?PageSize=50&Page=0",
"next_page_uri": null,
"outgoing_caller_ids": [],
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids.list()
self.assertIsNotNone(actual)
| 40.063584 | 150 | 0.58563 |
79582143be05b9d9e10b4667d293075895257c67 | 992 | py | Python | src/olympia/landfill/tests/test_users.py | atiqueahmedziad/addons-server | 6e1cc00bf15d245fbcdddf618286bba943731e45 | [
"BSD-3-Clause"
] | 10 | 2018-08-16T04:55:06.000Z | 2022-01-08T16:09:39.000Z | src/olympia/landfill/tests/test_users.py | atiqueahmedziad/addons-server | 6e1cc00bf15d245fbcdddf618286bba943731e45 | [
"BSD-3-Clause"
] | 171 | 2018-05-20T00:27:59.000Z | 2022-03-21T13:34:27.000Z | src/olympia/landfill/tests/test_users.py | atiqueahmedziad/addons-server | 6e1cc00bf15d245fbcdddf618286bba943731e45 | [
"BSD-3-Clause"
] | 12 | 2018-08-01T16:46:09.000Z | 2022-01-08T16:09:46.000Z | # -*- coding: utf-8 -*-
from olympia import amo
from olympia.addons.models import Addon, AddonCategory, AddonUser, Category
from olympia.amo.tests import TestCase
from olympia.landfill.user import (
generate_addon_user_and_category, generate_user)
from olympia.users.models import UserProfile
class RatingsTests(TestCase):
def setUp(self):
super(RatingsTests, self).setUp()
self.email = 'nobody@mozilla.org'
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
def test_generate_addon_user_and_category(self):
user = UserProfile.objects.create(email=self.email)
category = Category.objects.create(type=amo.ADDON_PERSONA)
generate_addon_user_and_category(self.addon, user, category)
assert AddonCategory.objects.all().count() == 1
assert AddonUser.objects.all().count() == 1
def test_generate_user(self):
generate_user(self.email)
assert UserProfile.objects.last().email == self.email
| 36.740741 | 75 | 0.725806 |
7958218b17c8b28f178ff2fae54cb918f9578315 | 1,239 | py | Python | HealthCareAIApp/app/src/main/python/featureExtraction.py | parvathysjsu/Adaptive-and-Heuristic-AI-enabled-IoT-Edge-for-high-risk-and-rural-patients | c3e639fabfdbe9ffa4ce1f9a700ebace254c0080 | [
"MIT"
] | null | null | null | HealthCareAIApp/app/src/main/python/featureExtraction.py | parvathysjsu/Adaptive-and-Heuristic-AI-enabled-IoT-Edge-for-high-risk-and-rural-patients | c3e639fabfdbe9ffa4ce1f9a700ebace254c0080 | [
"MIT"
] | 1 | 2020-11-13T08:47:47.000Z | 2020-11-13T08:47:47.000Z | HealthCareAIApp/app/src/main/python/featureExtraction.py | parvathysjsu/Adaptive-and-Heuristic-AI-enabled-IoT-Edge-for-high-risk-and-rural-patients | c3e639fabfdbe9ffa4ce1f9a700ebace254c0080 | [
"MIT"
] | 1 | 2020-12-10T02:08:20.000Z | 2020-12-10T02:08:20.000Z | import librosa as lb
import numpy as np
from scipy.signal import butter, lfilter
# Sample rate and desired cutoff frequencies (in Hz).
fs = 4000.0
lowcut = 100.0
highcut = 1800.0
#Set maxpad length as 79 <--(Sampling rate*5s)/256(hop length)
def build_feat(fpath):
max_pad_len = 79
wav, rate = lb.load(fpath, sr=4000)
bb = butter_bandpass_filter(wav, lowcut, highcut, fs, order=12)
bb = bb.astype('float32')
#limit the length of samples to only 6s (6*4000)
if bb.shape[0] > 20000:
bb = bb[0:20000]
X_sample = lb.feature.mfcc(bb, sr=rate, n_fft=512, win_length=400, n_mfcc=20, hop_length = 256, n_mels = 128, fmin = 100, fmax = 1800)
pad_width = max_pad_len - X_sample.shape[1]
X_sample = np.pad(X_sample, pad_width=((0, 0), (0, pad_width)), mode='constant')
X = X_sample.T.copy()
inp = np.expand_dims(X, axis=0)
return inp
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y | 31.769231 | 139 | 0.656174 |
795823465432c475d8b769ed3108e7debfbfa01c | 906 | py | Python | scripts/status.py | aditprab/tesla-cl-tools | b1df7a451cdbf3f80feb10bf96ea3bc3f7904489 | [
"MIT"
] | 1 | 2019-06-25T21:44:57.000Z | 2019-06-25T21:44:57.000Z | scripts/status.py | aditprab/tesla-cl-tools | b1df7a451cdbf3f80feb10bf96ea3bc3f7904489 | [
"MIT"
] | 1 | 2019-04-05T20:43:12.000Z | 2019-04-05T20:43:12.000Z | scripts/status.py | aditprab/tesla-cl-tools | b1df7a451cdbf3f80feb10bf96ea3bc3f7904489 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import requests
import propertiesReader
import authTokenGenerator
import vehicles
def output(vehicleName, response):
print(vehicleName + " is: " + response['charging_state'] + ".")
print("The current charge level is: " + str(response['battery_range']) + " miles.")
print("Battery is " + str(response['battery_level']) + "%" " full")
def getStatus():
(vehicleName,vehicleId) = vehicles.getVehicles()
authToken = authTokenGenerator.getAuthToken()
url = propertiesReader.getChargeStateUrl()
chargeStateUrl = url % vehicleId
headers = {
'User-Agent': propertiesReader.getUserAgent(),
'Content-Type': "application/json",
'Authorization': "Bearer " + authToken
}
res = requests.request("GET", chargeStateUrl, headers=headers)
resJson = res.json()
response = resJson['response']
output(vehicleName, response)
| 31.241379 | 87 | 0.681015 |
795823b9e9fff53f0bef62b8ad2974f5edec8b82 | 7,868 | py | Python | homeassistant/components/saj/sensor.py | serenewaffles/core | ee7116d0e85ab24548607d6b970d9915f3e3ae0b | [
"Apache-2.0"
] | 2 | 2021-01-29T02:52:01.000Z | 2021-05-15T04:23:18.000Z | homeassistant/components/saj/sensor.py | serenewaffles/core | ee7116d0e85ab24548607d6b970d9915f3e3ae0b | [
"Apache-2.0"
] | 44 | 2021-03-17T07:49:17.000Z | 2022-03-31T06:08:18.000Z | homeassistant/components/saj/sensor.py | serenewaffles/core | ee7116d0e85ab24548607d6b970d9915f3e3ae0b | [
"Apache-2.0"
] | 7 | 2021-03-20T12:34:01.000Z | 2021-12-02T10:13:52.000Z | """SAJ solar inverter interface."""
from __future__ import annotations
from datetime import date
import logging
import pysaj
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TYPE,
CONF_USERNAME,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
MASS_KILOGRAMS,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TIME_HOURS,
)
from homeassistant.core import CALLBACK_TYPE, callback
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_call_later
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
MIN_INTERVAL = 5
MAX_INTERVAL = 300
INVERTER_TYPES = ["ethernet", "wifi"]
SAJ_UNIT_MAPPINGS = {
"": None,
"h": TIME_HOURS,
"kg": MASS_KILOGRAMS,
"kWh": ENERGY_KILO_WATT_HOUR,
"W": POWER_WATT,
"°C": TEMP_CELSIUS,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE, default=INVERTER_TYPES[0]): vol.In(INVERTER_TYPES),
vol.Inclusive(CONF_USERNAME, "credentials"): cv.string,
vol.Inclusive(CONF_PASSWORD, "credentials"): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SAJ sensors."""
remove_interval_update = None
wifi = config[CONF_TYPE] == INVERTER_TYPES[1]
# Init all sensors
sensor_def = pysaj.Sensors(wifi)
# Use all sensors by default
hass_sensors = []
kwargs = {}
if wifi:
kwargs["wifi"] = True
if config.get(CONF_USERNAME) and config.get(CONF_PASSWORD):
kwargs["username"] = config[CONF_USERNAME]
kwargs["password"] = config[CONF_PASSWORD]
try:
saj = pysaj.SAJ(config[CONF_HOST], **kwargs)
done = await saj.read(sensor_def)
except pysaj.UnauthorizedException:
_LOGGER.error("Username and/or password is wrong")
return
except pysaj.UnexpectedResponseException as err:
_LOGGER.error(
"Error in SAJ, please check host/ip address. Original error: %s", err
)
return
if not done:
raise PlatformNotReady
for sensor in sensor_def:
if sensor.enabled:
hass_sensors.append(
SAJsensor(saj.serialnumber, sensor, inverter_name=config.get(CONF_NAME))
)
async_add_entities(hass_sensors)
async def async_saj():
"""Update all the SAJ sensors."""
values = await saj.read(sensor_def)
for sensor in hass_sensors:
state_unknown = False
# SAJ inverters are powered by DC via solar panels and thus are
# offline after the sun has set. If a sensor resets on a daily
# basis like "today_yield", this reset won't happen automatically.
# Code below checks if today > day when sensor was last updated
# and if so: set state to None.
# Sensors with live values like "temperature" or "current_power"
# will also be reset to None.
if not values and (
(sensor.per_day_basis and date.today() > sensor.date_updated)
or (not sensor.per_day_basis and not sensor.per_total_basis)
):
state_unknown = True
sensor.async_update_values(unknown_state=state_unknown)
return values
def start_update_interval(event):
"""Start the update interval scheduling."""
nonlocal remove_interval_update
remove_interval_update = async_track_time_interval_backoff(hass, async_saj)
def stop_update_interval(event):
"""Properly cancel the scheduled update."""
remove_interval_update() # pylint: disable=not-callable
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_update_interval)
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, stop_update_interval)
@callback
def async_track_time_interval_backoff(hass, action) -> CALLBACK_TYPE:
"""Add a listener that fires repetitively and increases the interval when failed."""
remove = None
interval = MIN_INTERVAL
async def interval_listener(now=None):
"""Handle elapsed interval with backoff."""
nonlocal interval, remove
try:
if await action():
interval = MIN_INTERVAL
else:
interval = min(interval * 2, MAX_INTERVAL)
finally:
remove = async_call_later(hass, interval, interval_listener)
hass.async_create_task(interval_listener())
def remove_listener():
"""Remove interval listener."""
if remove:
remove() # pylint: disable=not-callable
return remove_listener
class SAJsensor(SensorEntity):
"""Representation of a SAJ sensor."""
def __init__(self, serialnumber, pysaj_sensor, inverter_name=None):
"""Initialize the SAJ sensor."""
self._sensor = pysaj_sensor
self._inverter_name = inverter_name
self._serialnumber = serialnumber
self._state = self._sensor.value
if pysaj_sensor.name in ("current_power", "total_yield", "temperature"):
self._attr_state_class = STATE_CLASS_MEASUREMENT
if pysaj_sensor.name == "total_yield":
self._attr_last_reset = dt_util.utc_from_timestamp(0)
@property
def name(self):
"""Return the name of the sensor."""
if self._inverter_name:
return f"saj_{self._inverter_name}_{self._sensor.name}"
return f"saj_{self._sensor.name}"
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SAJ_UNIT_MAPPINGS[self._sensor.unit]
@property
def device_class(self):
"""Return the device class the sensor belongs to."""
if self.unit_of_measurement == POWER_WATT:
return DEVICE_CLASS_POWER
if self.unit_of_measurement == ENERGY_KILO_WATT_HOUR:
return DEVICE_CLASS_ENERGY
if (
self.unit_of_measurement == TEMP_CELSIUS
or self._sensor.unit == TEMP_FAHRENHEIT
):
return DEVICE_CLASS_TEMPERATURE
@property
def should_poll(self) -> bool:
"""SAJ sensors are updated & don't poll."""
return False
@property
def per_day_basis(self) -> bool:
"""Return if the sensors value is on daily basis or not."""
return self._sensor.per_day_basis
@property
def per_total_basis(self) -> bool:
"""Return if the sensors value is cumulative or not."""
return self._sensor.per_total_basis
@property
def date_updated(self) -> date:
"""Return the date when the sensor was last updated."""
return self._sensor.date
@callback
def async_update_values(self, unknown_state=False):
"""Update this sensor."""
update = False
if self._sensor.value != self._state:
update = True
self._state = self._sensor.value
if unknown_state and self._state is not None:
update = True
self._state = None
if update:
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
return f"{self._serialnumber}_{self._sensor.name}"
| 30.734375 | 88 | 0.656965 |
795823f819a4d0959bfc90e5e3e7504ba0b0ef4e | 19 | py | Python | payment/urls.py | lautarianoo/django_shop | 9bc575df8b7af5452bd15cc3cf4fb375be6384bd | [
"MIT"
] | null | null | null | payment/urls.py | lautarianoo/django_shop | 9bc575df8b7af5452bd15cc3cf4fb375be6384bd | [
"MIT"
] | null | null | null | payment/urls.py | lautarianoo/django_shop | 9bc575df8b7af5452bd15cc3cf4fb375be6384bd | [
"MIT"
] | null | null | null |
urlpatterns = [
] | 4.75 | 15 | 0.578947 |
7958260982a9f320b9fc8c743f8546400ae51dc4 | 7,269 | py | Python | frappe/tests/ui_test_helpers.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | frappe/tests/ui_test_helpers.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | frappe/tests/ui_test_helpers.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | import frappe
from frappe import _
from frappe.utils import add_to_date, now
@frappe.whitelist()
def create_if_not_exists(doc):
"""Create records if they dont exist.
Will check for uniqueness by checking if a record exists with these field value pairs
:param doc: dict of field value pairs. can be a list of dict for multiple records.
"""
if not frappe.local.dev_server:
frappe.throw(_("This method can only be accessed in development"), frappe.PermissionError)
doc = frappe.parse_json(doc)
if not isinstance(doc, list):
docs = [doc]
else:
docs = doc
names = []
for doc in docs:
doc = frappe._dict(doc)
filters = doc.copy()
filters.pop("doctype")
name = frappe.db.exists(doc.doctype, filters)
if not name:
d = frappe.get_doc(doc)
d.insert(ignore_permissions=True)
name = d.name
names.append(name)
return names
@frappe.whitelist()
def create_todo_records():
if frappe.db.get_all("ToDo", {"description": "this is first todo"}):
return
frappe.get_doc(
{"doctype": "ToDo", "date": add_to_date(now(), days=7), "description": "this is first todo"}
).insert()
frappe.get_doc(
{"doctype": "ToDo", "date": add_to_date(now(), days=-7), "description": "this is second todo"}
).insert()
frappe.get_doc(
{"doctype": "ToDo", "date": add_to_date(now(), months=2), "description": "this is third todo"}
).insert()
frappe.get_doc(
{"doctype": "ToDo", "date": add_to_date(now(), months=-2), "description": "this is fourth todo"}
).insert()
@frappe.whitelist()
def create_communication_record():
doc = frappe.get_doc(
{
"doctype": "Communication",
"recipients": "test@gmail.com",
"subject": "Test Form Communication 1",
"communication_date": frappe.utils.now_datetime(),
}
)
doc.insert()
return doc
@frappe.whitelist()
def setup_workflow():
from frappe.workflow.doctype.workflow.test_workflow import create_todo_workflow
create_todo_workflow()
create_todo_records()
frappe.clear_cache()
@frappe.whitelist()
def create_contact_phone_nos_records():
if frappe.db.get_all("Contact", {"first_name": "Test Contact"}):
return
doc = frappe.new_doc("Contact")
doc.first_name = "Test Contact"
for index in range(1000):
doc.append("phone_nos", {"phone": "123456{}".format(index)})
doc.insert()
@frappe.whitelist()
def create_doctype(name, fields):
fields = frappe.parse_json(fields)
if frappe.db.exists("DocType", name):
return
frappe.get_doc(
{
"doctype": "DocType",
"module": "Core",
"custom": 1,
"fields": fields,
"permissions": [{"role": "System Manager", "read": 1}],
"name": name,
}
).insert()
@frappe.whitelist()
def create_child_doctype(name, fields):
fields = frappe.parse_json(fields)
if frappe.db.exists("DocType", name):
return
frappe.get_doc(
{
"doctype": "DocType",
"module": "Core",
"istable": 1,
"custom": 1,
"fields": fields,
"permissions": [{"role": "System Manager", "read": 1}],
"name": name,
}
).insert()
@frappe.whitelist()
def create_contact_records():
if frappe.db.get_all("Contact", {"first_name": "Test Form Contact 1"}):
return
insert_contact("Test Form Contact 1", "12345")
insert_contact("Test Form Contact 2", "54321")
insert_contact("Test Form Contact 3", "12345")
@frappe.whitelist()
def create_multiple_todo_records():
if frappe.db.get_all("ToDo", {"description": "Multiple ToDo 1"}):
return
values = [("100{}".format(i), "Multiple ToDo {}".format(i)) for i in range(1, 1002)]
frappe.db.bulk_insert("ToDo", fields=["name", "description"], values=set(values))
def insert_contact(first_name, phone_number):
doc = frappe.get_doc({"doctype": "Contact", "first_name": first_name})
doc.append("phone_nos", {"phone": phone_number})
doc.insert()
@frappe.whitelist()
def create_form_tour():
if frappe.db.exists("Form Tour", {"name": "Test Form Tour"}):
return
def get_docfield_name(filters):
return frappe.db.get_value("DocField", filters, "name")
tour = frappe.get_doc(
{
"doctype": "Form Tour",
"title": "Test Form Tour",
"reference_doctype": "Contact",
"save_on_complete": 1,
"steps": [
{
"title": "Test Title 1",
"description": "Test Description 1",
"has_next_condition": 1,
"next_step_condition": "eval: doc.first_name",
"field": get_docfield_name({"parent": "Contact", "fieldname": "first_name"}),
"fieldname": "first_name",
"fieldtype": "Data",
},
{
"title": "Test Title 2",
"description": "Test Description 2",
"has_next_condition": 1,
"next_step_condition": "eval: doc.last_name",
"field": get_docfield_name({"parent": "Contact", "fieldname": "last_name"}),
"fieldname": "last_name",
"fieldtype": "Data",
},
{
"title": "Test Title 3",
"description": "Test Description 3",
"field": get_docfield_name({"parent": "Contact", "fieldname": "phone_nos"}),
"fieldname": "phone_nos",
"fieldtype": "Table",
},
{
"title": "Test Title 4",
"description": "Test Description 4",
"is_table_field": 1,
"parent_field": get_docfield_name({"parent": "Contact", "fieldname": "phone_nos"}),
"field": get_docfield_name({"parent": "Contact Phone", "fieldname": "phone"}),
"next_step_condition": "eval: doc.phone",
"has_next_condition": 1,
"fieldname": "phone",
"fieldtype": "Data",
},
],
}
)
tour.insert()
@frappe.whitelist()
def create_data_for_discussions():
web_page = create_web_page()
create_topic_and_reply(web_page)
def create_web_page():
web_page = frappe.db.exists("Web Page", {"route": "test-page-discussions"})
if not web_page:
web_page = frappe.get_doc(
{
"doctype": "Web Page",
"title": "Test page for discussions",
"route": "test-page-discussions",
"published": True,
}
)
web_page.save()
web_page.append(
"page_blocks",
{
"web_template": "Discussions",
"web_template_values": frappe.as_json(
{"title": "Discussions", "cta_title": "New Discussion", "docname": web_page.name}
),
},
)
web_page.save()
return web_page
def create_topic_and_reply(web_page):
topic = frappe.db.exists(
"Discussion Topic", {"reference_doctype": "Web Page", "reference_docname": web_page.name}
)
if not topic:
topic = frappe.get_doc(
{
"doctype": "Discussion Topic",
"reference_doctype": "Web Page",
"reference_docname": web_page.name,
"title": "Test Topic",
}
)
topic.save()
reply = frappe.get_doc(
{"doctype": "Discussion Reply", "topic": topic.name, "reply": "This is a test reply"}
)
reply.save()
@frappe.whitelist()
def update_webform_to_multistep():
if not frappe.db.exists("Web Form", "update-profile-duplicate"):
doc = frappe.get_doc("Web Form", "edit-profile")
_doc = frappe.copy_doc(doc)
_doc.is_multi_step_form = 1
_doc.title = "update-profile-duplicate"
_doc.route = "update-profile-duplicate"
_doc.is_standard = False
_doc.save()
@frappe.whitelist()
def update_child_table(name):
doc = frappe.get_doc("DocType", name)
if len(doc.fields) == 1:
doc.append(
"fields",
{
"fieldname": "doctype_to_link",
"fieldtype": "Link",
"in_list_view": 1,
"label": "Doctype to Link",
"options": "Doctype to Link",
},
)
doc.save()
| 24.640678 | 98 | 0.659651 |
795826434fb0ca20fc1bd998ab8b4e54388a5c2f | 9,886 | py | Python | src/main.py | sinbag/deepsampling | 40b28ad99f3cc4b37602e38765b62e2091642764 | [
"BSD-3-Clause"
] | 5 | 2019-10-19T06:45:48.000Z | 2021-01-26T10:53:13.000Z | src/main.py | sinbag/deepsampling | 40b28ad99f3cc4b37602e38765b62e2091642764 | [
"BSD-3-Clause"
] | null | null | null | src/main.py | sinbag/deepsampling | 40b28ad99f3cc4b37602e38765b62e2091642764 | [
"BSD-3-Clause"
] | 4 | 2019-10-16T06:52:08.000Z | 2020-08-06T18:38:23.000Z | import platform
import os, sys
if platform.system() == 'Linux':
# To find available GPU on a multi-gpu machine cluster
import utils.selectgpu as setgpu
os.environ["CUDA_VISIBLE_DEVICES"] = str(setgpu.pick_gpu_lowest_memory())
import argparse
import importlib
import numpy as np
import math
import time
import tensorflow as tf
from datetime import datetime
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, 'loss'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
sys.path.append(os.path.join(BASE_DIR, 'experiments'))
import model
import fourier
import pcf
import plotutils as plot
import ioutils as io
from mathutils import *
from telemetryutils import *
import setup
import sampler
import ioutils
import projection
import evaluation
#============================================================================
def train(env, experiment):
experimentID = datetime.utcnow().strftime("%Y%m%d%H%M%S")
logPath = io.joinPath(experiment.LOGS_DIR, experimentID)
tf.reset_default_graph()
sess = tf.Session()
# create input placeholder
inputNode = tf.placeholder(
tf.float32,
shape=(
env.trainingSetup.batchSize,
env.trainingSetup.pointCount,
env.trainingSetup.dimCount),
name="inputNode")
# create network
outputNode = model.createNetwork(env.trainingSetup, inputNode)
# create loss(es)
lossNode, spectrumNode, histogramNode = experiment.lossSetup(env, outputNode)
#-------------------------------------------------
# create optimizer
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(env.trainingSetup.learningRate, global_step, 200, 0.99, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
trainStep = optimizer.minimize(lossNode, global_step=global_step)
# initialize variables
sess.run(tf.global_variables_initializer())
# create telemetry
writer = tf.summary.FileWriter(logdir=logPath)
writer.add_graph(sess.graph)
writer.flush()
#--------------------------
def trainTimed(feedDict):
t0 = time.time()
_, loss = sess.run((trainStep, lossNode), feedDict)
t = round(time.time() - t0, 3)
return loss, t
#--------------------------
# training loop
if env.trainingSetup.trainIterations > 0:
print("==== Start training...")
for i in range(env.trainingSetup.trainIterations):
trainPoints = sampler.griddedRandom(
env.trainingSetup.batchSize,
env.trainingSetup.pointCount,
env.trainingSetup.dimCount,
env.trainingSetup.griddingDims)
loss, t = trainTimed({inputNode:trainPoints})
# monitor
outputStats(writer, i, loss)
if i % 10 == 0:
writer.flush()
sys.stdout.write('iter ' + str(i) + ' | loss ' + str(round(loss, 3)) + ' | time ' + str(t) + '\r')
sys.stdout.flush()
if i != 0 and env.trainingSetup.storeNetwork and i % env.trainingSetup.backupInterval == 0:
evaluation.simpleCheckpoint(env, sess, logPath, i)
print("")
writer.flush()
writer.close()
#-------------------------------------------------
# store trained network
if env.trainingSetup.storeNetwork:
evaluation.simpleCheckpoint(env, sess, logPath)
#-------------------------------------------------
# evaluation
print("==== Evaluating...")
testPoints = sampler.griddedRandom(
env.trainingSetup.batchSize,
env.trainingSetup.pointCount,
env.trainingSetup.dimCount,
env.trainingSetup.griddingDims)
#-------------------------------------------------
# output points visualization
outPoints = sess.run(outputNode, {inputNode:testPoints})
# scatter plots of points
if env.trainingSetup.griddingDims == 0:
grid = 1/math.sqrt(env.trainingSetup.pointCount) if env.trainingSetup.displayGrid else None
plot.multiPointPlot(
np.stack((testPoints[0], outPoints[0]), 0),
("input", "output"),
grid=grid,
filename = ioutils.joinPath(logPath, "points" + experiment.FILE_EXT))
# dither masks (when using gridding)
else:
if env.trainingSetup.dimCount - env.trainingSetup.griddingDims <= 3:
textures = plot.convertGriddedToArray(outPoints, env.trainingSetup.griddingDims)
# 2D textures
if env.trainingSetup.griddingDims == 2:
for b in range(env.trainingSetup.batchSize):
filename = ioutils.joinPath(logPath, "dithermask_" + str(b) + ".exr")
ioutils.saveExr(textures[b], filename)
# 3D textures (as 2D slices)
elif env.trainingSetup.griddingDims == 3:
for b in range(env.trainingSetup.batchSize):
for s in range(textures.shape[1]):
filename = ioutils.joinPath(logPath, "dithermask_b" + str(b) + "_s" + str(s) + ".exr")
ioutils.saveExr(textures[b, s, ...], filename)
else:
print("Could not save dither masks: gridding dimension > 3")
else:
print("Could not save dither masks: value dimensions > 3")
#-------------------------------------------------
# spectrum figures
if spectrumNode is not None:
#--------------------------
def spectrumOutput(spectrumNode, spectrumTarget, path):
expectedSpectrum = evaluation.produceExpectedOutput(
env,
sess,
spectrumTarget.shape,
inputNode,
spectrumNode)
if len(expectedSpectrum.shape) == 1:
plot.multiLinePlot((spectrumTarget, expectedSpectrum),
title="1d spectra", legend=("target", "result"), filename=path)
else:
io.saveExr(expectedSpectrum, filename=path)
#--------------------------
spectrumNode = [spectrumNode] if not isinstance(spectrumNode, list) else spectrumNode
for i, s in enumerate(spectrumNode):
spectrumPath = io.joinPath(logPath, "spectra_" + str(i) + experiment.FILE_EXT)
spectrumOutput(s, env.fourierSetupList[i].target, spectrumPath)
#-------------------------------------------------
# histogram figures
if histogramNode is not None:
#--------------------------
def histogramOutput(histogramNode, histogramTarget, path):
expectedHistogram = evaluation.produceExpectedOutput(
env,
sess,
histogramTarget.shape,
inputNode,
histogramNode)
plot.multiLinePlot((histogramTarget, expectedHistogram),
title="histograms", legend=("target", "result"), filename=path)
#--------------------------
histogramNode = [histogramNode] if not isinstance(histogramNode, list) else histogramNode
for i, h in enumerate(histogramNode):
histogramPath = io.joinPath(logPath, "histogram" + str(i) + experiment.FILE_EXT)
histogramOutput(h, env.histogramSetupList[i].target, histogramPath)
#-------------------------------------------------
# visualize trained variables
if env.trainingSetup.storeNetwork:
print("==== Extracting trained variables...")
kernelWeights = evaluation.extractModelWeights(sess, env.trainingSetup)
# plot kernels for each projection in different figure
for i in range(env.trainingSetup.projectionCount):
# line plots
plot.multiLinePlot(
kernelWeights[i:len(kernelWeights)+1:env.trainingSetup.projectionCount],
title="kernelWeights" + env.trainingSetup.projectionsStrings[i],
legend=None,
filename=ioutils.joinPath(logPath, "kernelVars_" + str(i) + experiment.FILE_EXT))
# surface plots
if env.trainingSetup.kernelCount > 1:
x = np.arange(env.trainingSetup.kernelSampleCount)
y = np.arange(env.trainingSetup.kernelCount)
x, y = np.meshgrid(x, y)
z = np.stack(kernelWeights[i:len(kernelWeights)+1:env.trainingSetup.projectionCount])
plot.surfacePlot(
[x, y, z],
title="kernelWeights" + env.trainingSetup.projectionsStrings[i],
filename=ioutils.joinPath(logPath, "kernelVars3D_" + str(i) + experiment.FILE_EXT))
#-------------------------------------------------
# save realizations
if env.trainingSetup.saveEvalRealizations:
realizationPath = ioutils.joinPath(logPath, "realizations/")
io.makeDir(realizationPath)
evaluation.saveRealizations(
env,
sess,
inputNode,
outputNode,
env.trainingSetup.evalRealizations,
realizationPath)
sess.close()
##============================================================================
def main():
# import experiment
parser = argparse.ArgumentParser()
parser.add_argument('-e','--experiment', help='experiment to run from /experiments', required=True)
args = parser.parse_args()
print("==== Import", args.experiment, "...")
experiment = importlib.import_module(args.experiment)
# setup
env = experiment.buildEnvironment()
# train
train(env, experiment)
#============================================================================
if __name__ == '__main__':
main()
print("========= TERMINATED =========")
| 35.056738 | 118 | 0.568885 |
795827515b942549cc9ea94ed3820f03dc658b16 | 3,221 | py | Python | demos/quantile/demo_synthetic.py | mathurinm/torch_itl | e3d92d753bd51ccf585029129110c93bbf9b5fd0 | [
"MIT"
] | null | null | null | demos/quantile/demo_synthetic.py | mathurinm/torch_itl | e3d92d753bd51ccf585029129110c93bbf9b5fd0 | [
"MIT"
] | null | null | null | demos/quantile/demo_synthetic.py | mathurinm/torch_itl | e3d92d753bd51ccf585029129110c93bbf9b5fd0 | [
"MIT"
] | null | null | null | import os
import sys
import importlib
if importlib.util.find_spec('torch_itl') is None:
path_to_lib = os.getcwd()[:-15]
sys.path.append(path_to_lib)
from torch_itl.estimator import IQR
from torch_itl.kernel import Gaussian, LearnableGaussian
from torch_itl.model import DecomposableIdentity
from torch_itl.sampler import LinearSampler
from torch_itl.datasets import import_data_toy_quantile
import torch
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# %%
# Defining a simple toy dataset:
print("Creating the dataset")
x_train, y_train, _ = import_data_toy_quantile(150)
n = x_train.shape[0]
m = 10
plt.figure()
plt.scatter(x_train, y_train, marker='.')
plt.show()
# %%
# Defining an ITL model, first without a learnable kernel
print("Defining the model")
kernel_input = Gaussian(3.5)
kernel_output = Gaussian(9)
model = DecomposableIdentity(kernel_input, kernel_output, 1)
lbda = 0.001
lbda_cross = 0.01
sampler = LinearSampler(0.1, 0.9, 10, 0)
sampler.m = 10
est = IQR(model, lbda, lbda_cross, sampler)
#%%
# Learning the coefficients of the model
print("Fitting the coefficients of the model")
est.fit_alpha_gd(x_train, y_train, n_epochs=40,
lr=0.001, line_search_fn='strong_wolfe')
#%%
# Plotting the loss along learning
plt.figure()
plt.title("Loss evolution with time")
plt.plot(est.losses)
plt.show()
best_loss = est.losses[-1]
# Plotting the model on test points
probs = est.sampler.sample(30)
x_test = torch.linspace(0, 1.4, 100).view(-1, 1)
y_pred = est.model.forward(x_test, probs).detach().numpy()
colors = [cm.viridis(x.item()) for x in torch.linspace(0, 1, 30)]
plt.figure()
plt.title("Conditional Quantiles output by our model")
plt.scatter(x_train, y_train, marker='.')
for i in range(30):
plt.plot(x_test, y_pred[:, i], c=colors[i])
plt.show()
# %%
# Let's learn the input kernel with ITL
# First define a neural net
n_h = 40
d_out = 10
model_kernel_input = torch.nn.Sequential(
torch.nn.Linear(x_train.shape[1], n_h),
torch.nn.ReLU(),
torch.nn.Linear(n_h, n_h),
torch.nn.Linear(n_h, d_out),
)
gamma = 3
optim_params = dict(lr=0.01, momentum=0, dampening=0,
weight_decay=0, nesterov=False)
kernel_input = LearnableGaussian(gamma, model_kernel_input, optim_params)
est.model.kernel_input = kernel_input
# %%
est.fit_kernel_input(x_train, y_train)
# plot the loss along learning the kernel
#%%
plt.figure()
plt.title("Loss evolution when learning the kernel")
plt.plot(est.model.kernel_input.losses)
plt.show()
# %%
# Now retrain the parameters alpha of the model
est.fit_alpha_gd(x_train, y_train, n_epochs=40,
lr=0.01, line_search_fn='strong_wolfe')
# plot the loss
plt.figure()
plt.title("Loss evolution when learning model coefficients again")
plt.plot(est.losses)
plt.show()
y_pred = est.model.forward(x_test, probs).detach().numpy()
colors = [cm.viridis(x.item()) for x in torch.linspace(0, 1, 30)]
plt.figure()
plt.title('Conditional Quantiles with learned kernel')
plt.scatter(x_train, y_train, marker='.')
for i in range(30):
plt.plot(x_test, y_pred[:, i], c=colors[i])
plt.show()
print('Loss gain from learning the kernel: ',
best_loss - est.losses[-1])
| 25.768 | 73 | 0.721515 |
7958277e4dda8e08f1e61a577845ec1330669583 | 7,850 | py | Python | docs/conf.py | maguas01/titanic | f16e13e3a88e9ef4ead1c8b47a7b4cd65811dc07 | [
"MIT"
] | null | null | null | docs/conf.py | maguas01/titanic | f16e13e3a88e9ef4ead1c8b47a7b4cd65811dc07 | [
"MIT"
] | null | null | null | docs/conf.py | maguas01/titanic | f16e13e3a88e9ef4ead1c8b47a7b4cd65811dc07 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# titanic documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'titanic'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'titanicdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'titanic.tex',
u'titanic Documentation',
u"Your name (or your organization/company/team)", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'titanic', u'titanic Documentation',
[u"Your name (or your organization/company/team)"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'titanic', u'titanic Documentation',
u"Your name (or your organization/company/team)", 'titanic',
'titanic data science challaenge', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.040816 | 80 | 0.707261 |
795829e4d021dd2393648750b424011c797ee1e0 | 261 | py | Python | vendor/pyexcel/ext/ods.py | Lwz1966/QQ-Groups-Spider | 5f953651b2000cbfe23057e12f3a7293fe1d94fb | [
"MIT"
] | 882 | 2016-07-23T12:26:19.000Z | 2022-03-29T08:02:32.000Z | pyexcel/ext/ods.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 13 | 2016-12-12T04:29:30.000Z | 2019-08-15T10:36:16.000Z | pyexcel/ext/ods.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 410 | 2016-07-23T15:15:12.000Z | 2022-03-19T08:59:58.000Z | """
pyexcel.ext.ods
~~~~~~~~~~~~~~~~~~~~~~~~
Deprecated module import
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
from ..deprecated import deprecated_pyexcel_ext
deprecated_pyexcel_ext('0.2.2', __name__)
| 20.076923 | 51 | 0.632184 |
795829e777b58f466017324eee8d5eba23bfc74e | 1,050 | py | Python | largest_rectangle_histogram.py | pranavdave893/Leetcode | 1f30ea37af7b60585d168b15d9397143f53c92a1 | [
"MIT"
] | null | null | null | largest_rectangle_histogram.py | pranavdave893/Leetcode | 1f30ea37af7b60585d168b15d9397143f53c92a1 | [
"MIT"
] | null | null | null | largest_rectangle_histogram.py | pranavdave893/Leetcode | 1f30ea37af7b60585d168b15d9397143f53c92a1 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def largestRectangleArea(self, height: List[int]) -> int:
stack = [-1]
height.append(0)
ans = 0
for i in range(len(height)):
while height[i] < height[stack[-1]]:
h = height[stack.pop()]
w = i - stack[-1] - 1
ans = max(ans, h*w)
stack.append(i)
return ans
def largestRectangleArea_divide(self, heights: List[int]) -> int:
def divide(start:int, end:int) -> int:
if start > end:
return 0
min_idx = start
for idx in range(start, end+1):
if heights[min_idx] > heights[idx]:
min_idx = idx
return max(heights[min_idx] * (end - start + 1), max(divide(start, min_idx-1), divide(min_idx+1, end)))
return divide(0, len(heights)-1)
abc = Solution()
print (abc.largestRectangleArea([6,7,5,2,4,5,9,3])) | 30 | 115 | 0.480952 |
79582c5448b8803a10eb88e8e71f7d184afc66e7 | 4,758 | py | Python | bin/gftools-fix-familymetadata.py | nyshadhr9/gftools | fe399074994a3d9cada25c37196d13707cdb53e0 | [
"Apache-2.0"
] | null | null | null | bin/gftools-fix-familymetadata.py | nyshadhr9/gftools | fe399074994a3d9cada25c37196d13707cdb53e0 | [
"Apache-2.0"
] | null | null | null | bin/gftools-fix-familymetadata.py | nyshadhr9/gftools | fe399074994a3d9cada25c37196d13707cdb53e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# Copyright 2016 The Fontbakery Authors
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import tabulate
from fontTools import ttLib
from gftools.constants import (PLATFORM_ID__WINDOWS,
NAMEID_STR,
NAMEID_FONT_FAMILY_NAME,
NAMEID_FONT_SUBFAMILY_NAME,
NAMEID_FULL_FONT_NAME,
NAMEID_POSTSCRIPT_NAME,
NAMEID_TYPOGRAPHIC_FAMILY_NAME,
NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME,
NAMEID_COMPATIBLE_FULL_MACONLY)
parser = argparse.ArgumentParser(description=("Print out family"
" metadata of the fonts"))
parser.add_argument('font', nargs="+")
parser.add_argument('--csv', default=False, action='store_true')
class FamilyMetadataTable(object):
headers = ['filename']
rows = []
current_row = []
def addToHeader(self, value):
if value not in self.headers:
self.headers.append(value)
def putnewRow(self, columnvalue=None):
self.current_row = []
if columnvalue:
self.current_row.append(columnvalue)
def putrowToTable(self):
self.rows.append(self.current_row)
def binary_string(self, value):
return "{:#010b} {:#010b}".format(value >> 8,
value & 0xFF).replace('0b', '')
def putfsSelection(self, ttfont):
self.addToHeader('fsSelection')
self.current_row.append(self.binary_string(ttfont['OS/2'].fsSelection))
def putmacStyle(self, ttfont):
self.addToHeader('macStyle')
self.current_row.append(self.binary_string(ttfont['head'].macStyle))
def putnameIds(self, ttfont, platform=PLATFORM_ID__WINDOWS):
for nameid in [NAMEID_FONT_FAMILY_NAME,
NAMEID_FONT_SUBFAMILY_NAME,
NAMEID_FULL_FONT_NAME,
NAMEID_POSTSCRIPT_NAME,
NAMEID_TYPOGRAPHIC_FAMILY_NAME,
NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME,
NAMEID_COMPATIBLE_FULL_MACONLY]:
value = ''
for name in ttfont['name'].names:
if nameid == name.nameID and platform == name.platformID:
value = name.string.decode(name.getEncoding()) or ''
break
self.addToHeader('{}:{}'.format(nameid, NAMEID_STR[nameid]))
self.current_row.append(value)
def putitalicAngle(self, ttfont):
self.addToHeader('italicAngle')
self.current_row.append(ttfont['post'].italicAngle)
def putwidthClass(self, ttfont):
self.addToHeader('usWidthClass')
self.current_row.append(ttfont['OS/2'].usWidthClass)
def putweightClass(self, ttfont):
self.addToHeader('usWeightClass')
self.current_row.append(ttfont['OS/2'].usWeightClass)
def putPanose(self, ttfont):
for i, k in enumerate(sorted(ttfont['OS/2'].panose.__dict__.keys())):
self.addToHeader(k)
self.current_row.append(getattr(ttfont['OS/2'].panose, k, 0))
def putfixedPitch(self, ttfont):
self.addToHeader('isFixedPitch')
self.current_row.append(ttfont['post'].isFixedPitch)
if __name__ == '__main__':
options = parser.parse_args()
rows = []
fm = FamilyMetadataTable()
for i, font in enumerate(options.font):
ttfont = ttLib.TTFont(font)
fm.putnewRow(os.path.basename(font))
fm.putnameIds(ttfont)
fm.putmacStyle(ttfont)
fm.putitalicAngle(ttfont)
fm.putfsSelection(ttfont)
fm.putweightClass(ttfont)
fm.putwidthClass(ttfont)
fm.putfixedPitch(ttfont)
fm.putPanose(ttfont)
fm.putrowToTable()
def as_csv(rows):
import csv
import sys
writer = csv.writer(sys.stdout)
writer.writerows([fm.headers])
writer.writerows(rows)
sys.exit(0)
if options.csv:
as_csv(fm.rows)
print(tabulate.tabulate(fm.rows, fm.headers))
| 35.507463 | 79 | 0.617486 |
79582cfdc79be10280a5fc9d3bce55e908a17fec | 1,192 | py | Python | kubernetes_asyncio/test/test_extensions_v1beta1_run_as_group_strategy_options.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_extensions_v1beta1_run_as_group_strategy_options.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_extensions_v1beta1_run_as_group_strategy_options.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.extensions_v1beta1_run_as_group_strategy_options import ExtensionsV1beta1RunAsGroupStrategyOptions # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestExtensionsV1beta1RunAsGroupStrategyOptions(unittest.TestCase):
"""ExtensionsV1beta1RunAsGroupStrategyOptions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionsV1beta1RunAsGroupStrategyOptions(self):
"""Test ExtensionsV1beta1RunAsGroupStrategyOptions"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.extensions_v1beta1_run_as_group_strategy_options.ExtensionsV1beta1RunAsGroupStrategyOptions() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 29.8 | 158 | 0.777685 |
79582f85d124ef65fbc3e24676f1082e91768c3c | 10,611 | py | Python | jinja2schema/visitors/stmt.py | jlevesque/jinja2schema | 5340f43d18dc2f9fcd7b6b18a5bdd83b3410a222 | [
"BSD-3-Clause"
] | null | null | null | jinja2schema/visitors/stmt.py | jlevesque/jinja2schema | 5340f43d18dc2f9fcd7b6b18a5bdd83b3410a222 | [
"BSD-3-Clause"
] | null | null | null | jinja2schema/visitors/stmt.py | jlevesque/jinja2schema | 5340f43d18dc2f9fcd7b6b18a5bdd83b3410a222 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
"""
jinja2schema.visitors.stmt
~~~~~~~~~~~~~~~~~~~~~~~~~~
Statement is an instance of :class:`jinja2.nodes.Stmt`.
Statement visitors return :class:`.models.Dictionary` of structures of variables used within the statement.
"""
import functools
from jinja2 import nodes, Environment, PackageLoader
from jinja2schema.config import default_config
from ..model import Scalar, Dictionary, List, Unknown, Tuple, Boolean
from ..macro import Macro
from ..mergers import merge, merge_many
from ..exceptions import InvalidExpression
from .._compat import iteritems, izip, zip_longest
from .expr import Context, visit_expr
from .util import visit_many
stmt_visitors = {}
def visits_stmt(node_cls):
"""Decorator that registers a function as a visitor for ``node_cls``.
:param node_cls: subclass of :class:`jinja2.nodes.Stmt`
"""
def decorator(func):
stmt_visitors[node_cls] = func
@functools.wraps(func)
def wrapped_func(ast, macroses=None, config=default_config, child_blocks=None):
assert isinstance(ast, node_cls)
return func(ast, macroses, config, child_blocks)
return wrapped_func
return decorator
def visit_stmt(ast, macroses=None, config=default_config, child_blocks=None):
"""Returns a structure of ``ast``.
:param ast: instance of :class:`jinja2.nodes.Stmt`
:returns: :class:`.model.Dictionary`
"""
visitor = stmt_visitors.get(type(ast))
if not visitor:
for node_cls, visitor_ in iteritems(stmt_visitors):
if isinstance(ast, node_cls):
visitor = visitor_
if not visitor:
raise Exception('stmt visitor for {0} is not found'.format(type(ast)))
return visitor(ast, macroses, config)
def visit_test_node(ast, macroses=None, config=default_config, ):
if config.BOOLEAN_CONDITIONS:
test_predicted_struct = Boolean.from_ast(ast.test, order_nr=config.ORDER_OBJECT.get_next())
else:
test_predicted_struct = Unknown.from_ast(ast.test, order_nr=config.ORDER_OBJECT.get_next())
test_rtype, test_struct = visit_expr(
ast.test, Context(predicted_struct=test_predicted_struct), macroses, config)
return test_rtype, test_struct
@visits_stmt(nodes.For)
def visit_for(ast, macroses=None, config=default_config, child_blocks=None):
with config.ORDER_OBJECT.sub_counter():
body_struct = visit_many(ast.body, macroses, config, predicted_struct_cls=Scalar)
with config.ORDER_OBJECT.sub_counter():
else_struct = visit_many(ast.else_, macroses, config, predicted_struct_cls=Scalar)
if 'loop' in body_struct:
# exclude a special `loop` variable from the body structure
del body_struct['loop']
if isinstance(ast.target, nodes.Tuple):
target_struct = Tuple.from_ast(
ast.target,
[body_struct.pop(item.name, Unknown.from_ast(ast.target, order_nr=config.ORDER_OBJECT.get_next()))
for item in ast.target.items], order_nr=config.ORDER_OBJECT.get_next())
else:
target_struct = body_struct.pop(ast.target.name, Unknown.from_ast(ast, order_nr=config.ORDER_OBJECT.get_next()))
iter_rtype, iter_struct = visit_expr(
ast.iter,
Context(
return_struct_cls=Unknown,
predicted_struct=List.from_ast(ast, target_struct, order_nr=config.ORDER_OBJECT.get_next())),
macroses, config)
iter_test_struct = Dictionary()
if ast.test:
_, test_struct = visit_test_node(ast, macroses, config)
test_struct = test_struct.pop(ast.target.name, Unknown.from_ast(ast, order_nr=config.ORDER_OBJECT.get_next()))
_, iter_test_struct = visit_expr(
ast.iter,
Context(
return_struct_cls=Unknown,
predicted_struct=List.from_ast(ast, test_struct, order_nr=config.ORDER_OBJECT.get_next())),
macroses, config)
merge(iter_rtype, List(target_struct))
return merge_many(iter_struct, body_struct, else_struct, iter_test_struct)
@visits_stmt(nodes.If)
def visit_if(ast, macroses=None, config=default_config, child_blocks=None):
test_rtype, test_struct = visit_test_node(ast, macroses, config)
if_struct = visit_many(ast.body, macroses, config, predicted_struct_cls=Scalar)
else_struct = visit_many(ast.else_, macroses, config, predicted_struct_cls=Scalar) if ast.else_ else Dictionary()
struct = merge_many(test_struct, if_struct, else_struct)
for var_name, var_struct in iteritems(test_struct):
if var_struct.checked_as_defined or var_struct.checked_as_undefined:
if var_struct.checked_as_undefined:
lookup_struct = if_struct
elif var_struct.checked_as_defined:
lookup_struct = else_struct
struct[var_name].may_be_defined = (lookup_struct and
var_name in lookup_struct and
lookup_struct[var_name].constant)
struct[var_name].checked_as_defined = test_struct[var_name].checked_as_defined and (
not lookup_struct or not var_name in lookup_struct or lookup_struct[var_name].constant
)
struct[var_name].checked_as_undefined = test_struct[var_name].checked_as_undefined and (
not lookup_struct or not var_name in lookup_struct or lookup_struct[var_name].constant
)
return struct
@visits_stmt(nodes.Assign)
def visit_assign(ast, macroses=None, config=default_config, child_blocks=None):
struct = Dictionary()
if (isinstance(ast.target, nodes.Name) or
(isinstance(ast.target, nodes.Tuple) and isinstance(ast.node, nodes.Tuple))):
variables = []
if not (isinstance(ast.target, nodes.Tuple) and isinstance(ast.node, nodes.Tuple)):
variables.append((ast.target.name, ast.node))
else:
if len(ast.target.items) != len(ast.node.items):
raise InvalidExpression(ast, 'number of items in left side is different '
'from right side')
for name_ast, var_ast in izip(ast.target.items, ast.node.items):
variables.append((name_ast.name, var_ast))
for var_name, var_ast in variables:
var_rtype, var_struct = visit_expr(var_ast, Context(
predicted_struct=Unknown.from_ast(var_ast, order_nr=config.ORDER_OBJECT.get_next())), macroses, config)
var_rtype.constant = True
var_rtype.label = var_name
struct = merge_many(struct, var_struct, Dictionary({
var_name: var_rtype,
}))
return struct
elif isinstance(ast.target, nodes.Tuple):
tuple_items = []
for name_ast in ast.target.items:
var_struct = Unknown.from_ast(name_ast, constant=True, order_nr=config.ORDER_OBJECT.get_next())
tuple_items.append(var_struct)
struct = merge(struct, Dictionary({name_ast.name: var_struct}))
var_rtype, var_struct = visit_expr(
ast.node, Context(return_struct_cls=Unknown, predicted_struct=Tuple(tuple_items)), macroses, config)
return merge(struct, var_struct)
else:
raise InvalidExpression(ast, 'unsupported assignment')
@visits_stmt(nodes.Output)
def visit_output(ast, macroses=None, config=default_config, child_blocks=None):
return visit_many(ast.nodes, macroses, config, predicted_struct_cls=Scalar)
@visits_stmt(nodes.Macro)
def visit_macro(ast, macroses=None, config=default_config, child_blocks=None):
# XXX the code needs to be refactored
args = []
kwargs = []
body_struct = visit_many(ast.body, macroses, config, predicted_struct_cls=Scalar)
for i, (arg, default_value_ast) in enumerate(reversed(list(zip_longest(reversed(ast.args),
reversed(ast.defaults)))), start=1):
has_default_value = bool(default_value_ast)
if has_default_value:
default_rtype, default_struct = visit_expr(
default_value_ast, Context(predicted_struct=Unknown()), macroses, config)
else:
default_rtype = Unknown(linenos=[arg.lineno])
default_rtype.constant = False
default_rtype.label = 'argument "{0}"'.format(arg.name) if has_default_value else 'argument #{0}'.format(i)
if arg.name in body_struct:
default_rtype = merge(default_rtype, body_struct[arg.name]) # just to make sure
default_rtype.linenos = [ast.lineno]
if has_default_value:
kwargs.append((arg.name, default_rtype))
else:
args.append((arg.name, default_rtype))
macroses[ast.name] = Macro(ast.name, args, kwargs)
tmp = dict(args)
tmp.update(dict(kwargs))
args_struct = Dictionary(tmp)
for arg_name, arg_type in args:
args_struct[arg_name] = arg_type
for arg in args_struct.iterkeys():
body_struct.pop(arg, None)
return body_struct
@visits_stmt(nodes.Block)
def visit_block(ast, macroses=None, config=default_config):
return visit_many(ast.body, macroses, config)
@visits_stmt(nodes.Include)
def visit_include(ast, macroses=None, config=default_config, child_blocks=None):
template = get_inherited_template(config, ast)
return visit_many(template.body, macroses, config)
@visits_stmt(nodes.Extends)
def visit_extends(ast, macroses=None, config=default_config, child_blocks=None):
template = get_inherited_template(config, ast)
if not child_blocks:
return visit_many(template.body, macroses, config)
return visit_many(get_correct_nodes(child_blocks, template.body), None, config)
def get_inherited_template(config, ast):
env = Environment(loader=PackageLoader(config.PACKAGE_NAME, config.TEMPLATE_DIR))
return env.parse(env.loader.get_source(env, ast.template.value)[0])
def separate_template_blocks(template, blocks, template_nodes):
for node in template:
if isinstance(node, nodes.Block):
blocks.append(node)
else:
template_nodes.append(node)
return blocks, template_nodes
def get_correct_nodes(child_blocks, template):
parent_blocks, nodes = separate_template_blocks(template, [], [])
child_block_names = [c.name for c in child_blocks]
blocks = child_blocks + parent_blocks
for parent_block in parent_blocks:
if parent_block.name in child_block_names:
blocks.remove(parent_block)
return blocks + nodes
| 41.127907 | 120 | 0.684478 |
79582fc6d2dbb2ccd8327fea8b1cad66aa8cd54a | 15,953 | py | Python | test/utils.py | stephan-hof/mongo-python-driver | a30eb87db3a0db4a9bd0ff9472f30b8215100f23 | [
"Apache-2.0"
] | null | null | null | test/utils.py | stephan-hof/mongo-python-driver | a30eb87db3a0db4a9bd0ff9472f30b8215100f23 | [
"Apache-2.0"
] | null | null | null | test/utils.py | stephan-hof/mongo-python-driver | a30eb87db3a0db4a9bd0ff9472f30b8215100f23 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing pymongo
"""
import contextlib
import functools
import os
import struct
import sys
import threading
import time
import warnings
from collections import defaultdict
from functools import partial
from pymongo import MongoClient, monitoring
from pymongo.errors import AutoReconnect, OperationFailure
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.write_concern import WriteConcern
from test import (client_context,
db_user,
db_pwd)
IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000)
class WhiteListEventListener(monitoring.CommandListener):
def __init__(self, *commands):
self.commands = set(commands)
self.results = defaultdict(list)
def started(self, event):
if event.command_name in self.commands:
self.results['started'].append(event)
def succeeded(self, event):
if event.command_name in self.commands:
self.results['succeeded'].append(event)
def failed(self, event):
if event.command_name in self.commands:
self.results['failed'].append(event)
class EventListener(monitoring.CommandListener):
def __init__(self):
self.results = defaultdict(list)
def started(self, event):
self.results['started'].append(event)
def succeeded(self, event):
self.results['succeeded'].append(event)
def failed(self, event):
self.results['failed'].append(event)
class ServerAndTopologyEventListener(monitoring.ServerListener,
monitoring.TopologyListener):
"""Listens to all events."""
def __init__(self):
self.results = []
def opened(self, event):
self.results.append(event)
def description_changed(self, event):
self.results.append(event)
def closed(self, event):
self.results.append(event)
class HeartbeatEventListener(monitoring.ServerHeartbeatListener):
"""Listens to only server heartbeat events."""
def __init__(self):
self.results = []
def started(self, event):
self.results.append(event)
def succeeded(self, event):
self.results.append(event)
def failed(self, event):
self.results.append(event)
def _connection_string(h, p, authenticate):
if h.startswith("mongodb://"):
return h
elif client_context.auth_enabled and authenticate:
return "mongodb://%s:%s@%s:%d" % (db_user, db_pwd, str(h), p)
else:
return "mongodb://%s:%d" % (str(h), p)
def _mongo_client(host, port, authenticate=True, direct=False, **kwargs):
"""Create a new client over SSL/TLS if necessary."""
client_options = client_context.ssl_client_options.copy()
if client_context.replica_set_name and not direct:
client_options['replicaSet'] = client_context.replica_set_name
client_options.update(kwargs)
client = MongoClient(_connection_string(host, port, authenticate), port,
**client_options)
return client
def single_client_noauth(
h=client_context.host, p=client_context.port, **kwargs):
"""Make a direct connection. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, direct=True, **kwargs)
def single_client(
h=client_context.host, p=client_context.port, **kwargs):
"""Make a direct connection, and authenticate if necessary."""
return _mongo_client(h, p, direct=True, **kwargs)
def rs_client_noauth(
h=client_context.host, p=client_context.port, **kwargs):
"""Connect to the replica set. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_client(
h=client_context.host, p=client_context.port, **kwargs):
"""Connect to the replica set and authenticate if necessary."""
return _mongo_client(h, p, **kwargs)
def rs_or_single_client_noauth(
h=client_context.host, p=client_context.port, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Like rs_or_single_client, but does not authenticate.
"""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_or_single_client(
h=client_context.host, p=client_context.port, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Authenticates if necessary.
"""
return _mongo_client(h, p, **kwargs)
def one(s):
"""Get one element of a set"""
return next(iter(s))
def oid_generated_on_client(oid):
"""Is this process's PID in this ObjectId?"""
pid_from_doc = struct.unpack(">H", oid.binary[7:9])[0]
return (os.getpid() % 0xFFFF) == pid_from_doc
def delay(sec):
return '''function() { sleep(%f * 1000); return true; }''' % sec
def get_command_line(client):
command_line = client.admin.command('getCmdLineOpts')
assert command_line['ok'] == 1, "getCmdLineOpts() failed"
return command_line
def server_started_with_option(client, cmdline_opt, config_opt):
"""Check if the server was started with a particular option.
:Parameters:
- `cmdline_opt`: The command line option (i.e. --nojournal)
- `config_opt`: The config file option (i.e. nojournal)
"""
command_line = get_command_line(client)
if 'parsed' in command_line:
parsed = command_line['parsed']
if config_opt in parsed:
return parsed[config_opt]
argv = command_line['argv']
return cmdline_opt in argv
def server_started_with_auth(client):
try:
command_line = get_command_line(client)
except OperationFailure as e:
msg = e.details.get('errmsg', '')
if e.code == 13 or 'unauthorized' in msg or 'login' in msg:
# Unauthorized.
return True
raise
# MongoDB >= 2.0
if 'parsed' in command_line:
parsed = command_line['parsed']
# MongoDB >= 2.6
if 'security' in parsed:
security = parsed['security']
# >= rc3
if 'authorization' in security:
return security['authorization'] == 'enabled'
# < rc3
return security.get('auth', False) or bool(security.get('keyFile'))
return parsed.get('auth', False) or bool(parsed.get('keyFile'))
# Legacy
argv = command_line['argv']
return '--auth' in argv or '--keyFile' in argv
def server_started_with_nojournal(client):
command_line = get_command_line(client)
# MongoDB 2.6.
if 'parsed' in command_line:
parsed = command_line['parsed']
if 'storage' in parsed:
storage = parsed['storage']
if 'journal' in storage:
return not storage['journal']['enabled']
return server_started_with_option(client, '--nojournal', 'nojournal')
def server_is_master_with_slave(client):
command_line = get_command_line(client)
if 'parsed' in command_line:
return command_line['parsed'].get('master', False)
return '--master' in command_line['argv']
def drop_collections(db):
for coll in db.collection_names():
if not coll.startswith('system'):
db.drop_collection(coll)
def remove_all_users(db):
db.command("dropAllUsersFromDatabase", 1,
writeConcern={"w": client_context.w})
def joinall(threads):
"""Join threads with a 5-minute timeout, assert joins succeeded"""
for t in threads:
t.join(300)
assert not t.isAlive(), "Thread %s hung" % t
def connected(client):
"""Convenience to wait for a newly-constructed client to connect."""
with warnings.catch_warnings():
# Ignore warning that "ismaster" is always routed to primary even
# if client's read preference isn't PRIMARY.
warnings.simplefilter("ignore", UserWarning)
client.admin.command('ismaster') # Force connection.
return client
def wait_until(predicate, success_description, timeout=10):
"""Wait up to 10 seconds (by default) for predicate to be true.
E.g.:
wait_until(lambda: client.primary == ('a', 1),
'connect to the primary')
If the lambda-expression isn't true after 10 seconds, we raise
AssertionError("Didn't ever connect to the primary").
Returns the predicate's first true value.
"""
start = time.time()
while True:
retval = predicate()
if retval:
return retval
if time.time() - start > timeout:
raise AssertionError("Didn't ever %s" % success_description)
time.sleep(0.1)
def is_mongos(client):
res = client.admin.command('ismaster')
return res.get('msg', '') == 'isdbgrid'
def assertRaisesExactly(cls, fn, *args, **kwargs):
"""
Unlike the standard assertRaises, this checks that a function raises a
specific class of exception, and not a subclass. E.g., check that
MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect.
"""
try:
fn(*args, **kwargs)
except Exception as e:
assert e.__class__ == cls, "got %s, expected %s" % (
e.__class__.__name__, cls.__name__)
else:
raise AssertionError("%s not raised" % cls)
@contextlib.contextmanager
def _ignore_deprecations():
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
yield
def ignore_deprecations(wrapped=None):
"""A context manager or a decorator."""
if wrapped:
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
with _ignore_deprecations():
return wrapped(*args, **kwargs)
return wrapper
else:
return _ignore_deprecations()
class DeprecationFilter(object):
def __init__(self, action="ignore"):
"""Start filtering deprecations."""
self.warn_context = warnings.catch_warnings()
self.warn_context.__enter__()
warnings.simplefilter(action, DeprecationWarning)
def stop(self):
"""Stop filtering deprecations."""
self.warn_context.__exit__()
self.warn_context = None
def read_from_which_host(
client,
pref,
tag_sets=None,
):
"""Read from a client with the given Read Preference.
Return the 'host:port' which was read from.
:Parameters:
- `client`: A MongoClient
- `mode`: A ReadPreference
- `tag_sets`: List of dicts of tags for data-center-aware reads
"""
db = client.pymongo_test
if isinstance(tag_sets, dict):
tag_sets = [tag_sets]
if tag_sets:
tags = tag_sets or pref.tag_sets
pref = pref.__class__(tags)
db.read_preference = pref
cursor = db.test.find()
try:
try:
next(cursor)
except StopIteration:
# No documents in collection, that's fine
pass
return cursor.address
except AutoReconnect:
return None
def assertReadFrom(testcase, client, member, *args, **kwargs):
"""Check that a query with the given mode and tag_sets reads from
the expected replica-set member.
:Parameters:
- `testcase`: A unittest.TestCase
- `client`: A MongoClient
- `member`: A host:port expected to be used
- `mode`: A ReadPreference
- `tag_sets` (optional): List of dicts of tags for data-center-aware reads
"""
for _ in range(10):
testcase.assertEqual(member,
read_from_which_host(client, *args, **kwargs))
def assertReadFromAll(testcase, client, members, *args, **kwargs):
"""Check that a query with the given mode and tag_sets reads from all
members in a set, and only members in that set.
:Parameters:
- `testcase`: A unittest.TestCase
- `client`: A MongoClient
- `members`: Sequence of host:port expected to be used
- `mode`: A ReadPreference
- `tag_sets` (optional): List of dicts of tags for data-center-aware reads
"""
members = set(members)
used = set()
for _ in range(100):
used.add(read_from_which_host(client, *args, **kwargs))
testcase.assertEqual(members, used)
def get_pool(client):
"""Get the standalone, primary, or mongos pool."""
topology = client._get_topology()
server = topology.select_server(writable_server_selector)
return server.pool
def get_pools(client):
"""Get all pools."""
return [
server.pool for server in
client._get_topology().select_servers(any_server_selector)]
# Constants for run_threads and lazy_client_trial.
NTRIALS = 5
NTHREADS = 10
def run_threads(collection, target):
"""Run a target function in many threads.
target is a function taking a Collection and an integer.
"""
threads = []
for i in range(NTHREADS):
bound_target = partial(target, collection, i)
threads.append(threading.Thread(target=bound_target))
for t in threads:
t.start()
for t in threads:
t.join(30)
assert not t.isAlive()
@contextlib.contextmanager
def frequent_thread_switches():
"""Make concurrency bugs more likely to manifest."""
interval = None
if not sys.platform.startswith('java'):
if hasattr(sys, 'getswitchinterval'):
interval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
else:
interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
yield
finally:
if not sys.platform.startswith('java'):
if hasattr(sys, 'setswitchinterval'):
sys.setswitchinterval(interval)
else:
sys.setcheckinterval(interval)
def lazy_client_trial(reset, target, test, get_client):
"""Test concurrent operations on a lazily-connecting client.
`reset` takes a collection and resets it for the next trial.
`target` takes a lazily-connecting collection and an index from
0 to NTHREADS, and performs some operation, e.g. an insert.
`test` takes the lazily-connecting collection and asserts a
post-condition to prove `target` succeeded.
"""
collection = client_context.client.pymongo_test.test
with frequent_thread_switches():
for i in range(NTRIALS):
reset(collection)
lazy_client = get_client()
lazy_collection = lazy_client.pymongo_test.test
run_threads(lazy_collection, target)
test(lazy_collection)
def gevent_monkey_patched():
"""Check if gevent's monkey patching is active."""
# In Python 3.6 importing gevent.socket raises an ImportWarning.
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
try:
import socket
import gevent.socket
return socket.socket is gevent.socket.socket
except ImportError:
return False
def eventlet_monkey_patched():
"""Check if eventlet's monkey patching is active."""
try:
import threading
import eventlet
return (threading.current_thread.__module__ ==
'eventlet.green.threading')
except ImportError:
return False
def is_greenthread_patched():
return gevent_monkey_patched() or eventlet_monkey_patched()
| 29.111314 | 80 | 0.655488 |
795831332447d09bbd722d4b622e2ed492219069 | 9,138 | py | Python | Config/config.py | DA-southampton/PyTorch_Bert_Text_Classification | 6ad4fc5f1ef502df762bb5cba0cdfc1cca7ec550 | [
"Apache-2.0"
] | 32 | 2019-03-16T08:50:01.000Z | 2022-03-10T01:47:45.000Z | Config/config.py | DA-southampton/PyTorch_Bert_Text_Classification | 6ad4fc5f1ef502df762bb5cba0cdfc1cca7ec550 | [
"Apache-2.0"
] | 5 | 2019-03-22T13:39:52.000Z | 2021-04-10T12:05:06.000Z | Config/config.py | DA-southampton/PyTorch_Bert_Text_Classification | 6ad4fc5f1ef502df762bb5cba0cdfc1cca7ec550 | [
"Apache-2.0"
] | 5 | 2019-05-08T04:56:43.000Z | 2020-07-29T07:19:22.000Z |
from configparser import ConfigParser
import os
class myconf(ConfigParser):
"""
MyConf
"""
def __init__(self, defaults=None):
ConfigParser.__init__(self, defaults=defaults)
self.add_sec = "Additional"
def optionxform(self, optionstr):
return optionstr
class Configurable(myconf):
def __init__(self, config_file):
# config = ConfigParser()
super().__init__()
self.test = None
self.train = None
config = myconf()
config.read(config_file)
# if config.has_section(self.add_sec) is False:
# config.add_section(self.add_sec)
self._config = config
self.config_file = config_file
print('Loaded config file sucessfully.')
for section in config.sections():
for k, v in config.items(section):
print(k, ":", v)
if not os.path.isdir(self.save_direction):
os.mkdir(self.save_direction)
config.write(open(config_file, 'w'))
def add_args(self, key, value):
self._config.set(self.add_sec, key, value)
self._config.write(open(self.config_file, 'w'))
# Embed
@property
def pretrained_embed(self):
return self._config.getboolean('Embed', 'pretrained_embed')
@property
def zeros(self):
return self._config.getboolean('Embed', 'zeros')
@property
def avg(self):
return self._config.getboolean('Embed', 'avg')
@property
def uniform(self):
return self._config.getboolean('Embed', 'uniform')
@property
def nnembed(self):
return self._config.getboolean('Embed', 'nnembed')
@property
def pretrained_embed_file(self):
return self._config.get('Embed', 'pretrained_embed_file')
# Data
@property
def train_file(self):
return self._config.get('Data', 'train_file')
@property
def dev_file(self):
return self._config.get('Data', 'dev_file')
@property
def test_file(self):
return self._config.get('Data', 'test_file')
@property
def max_count(self):
return self._config.getint('Data', 'max_count')
@property
def min_freq(self):
return self._config.getint('Data', 'min_freq')
@property
def shuffle(self):
return self._config.getboolean('Data', 'shuffle')
@property
def epochs_shuffle(self):
return self._config.getboolean('Data', 'epochs_shuffle')
# Bert
@property
def use_bert(self):
return self._config.getboolean('Bert', 'use_bert')
@property
def bert_dim(self):
return self._config.getint('Bert', 'bert_dim')
@property
def bert_train_file(self):
return self._config.get('Bert', 'bert_train_file')
@property
def bert_dev_file(self):
return self._config.get('Bert', 'bert_dev_file')
@property
def bert_test_file(self):
return self._config.get('Bert', 'bert_test_file')
# BertModel
@property
def use_bert_model(self):
return self._config.getboolean('BertModel', 'use_bert_model')
@property
def bert_model_path(self):
return self._config.get('BertModel', 'bert_model_path')
@property
def bert_model_vocab(self):
return self._config.get('BertModel', 'bert_model_vocab')
@property
def bert_max_char_length(self):
return self._config.getint('BertModel', 'bert_max_char_length')
@property
def bert_model_max_seq_length(self):
return self._config.getint('BertModel', 'bert_model_max_seq_length')
@property
def bert_model_batch_size(self):
return self._config.getint('BertModel', 'bert_model_batch_size')
@property
def extract_dim(self):
return self._config.getint('BertModel', 'extract_dim')
@property
def layers(self):
return self._config.get('BertModel', 'layers')
@property
def local_rank(self):
return self._config.getint('BertModel', 'local_rank')
@property
def no_cuda(self):
return self._config.getboolean('BertModel', 'no_cuda')
@property
def do_lower_case(self):
return self._config.getboolean('BertModel', 'do_lower_case')
# Save
@property
def save_pkl(self):
return self._config.getboolean('Save', 'save_pkl')
@property
def pkl_directory(self):
return self._config.get('Save', 'pkl_directory')
@property
def pkl_data(self):
return self._config.get('Save', 'pkl_data')
@property
def pkl_alphabet(self):
return self._config.get('Save', 'pkl_alphabet')
@property
def pkl_iter(self):
return self._config.get('Save', 'pkl_iter')
@property
def pkl_embed(self):
return self._config.get('Save', 'pkl_embed')
@property
def save_dict(self):
return self._config.getboolean('Save', 'save_dict')
@property
def save_direction(self):
return self._config.get('Save', 'save_direction')
@property
def dict_directory(self):
return self._config.get('Save', 'dict_directory')
@property
def word_dict(self):
return self._config.get('Save', 'word_dict')
@property
def label_dict(self):
return self._config.get('Save', 'label_dict')
@property
def model_name(self):
return self._config.get('Save', 'model_name')
@property
def save_best_model_dir(self):
return self._config.get('Save', 'save_best_model_dir')
@property
def save_model(self):
return self._config.getboolean('Save', 'save_model')
@property
def save_all_model(self):
return self._config.getboolean('Save', 'save_all_model')
@property
def save_best_model(self):
return self._config.getboolean('Save', 'save_best_model')
@property
def rm_model(self):
return self._config.getboolean('Save', 'rm_model')
# Model
@property
def wide_conv(self):
return self._config.getboolean("Model", "wide_conv")
@property
def model_cnn(self):
return self._config.getboolean("Model", "model_cnn")
@property
def model_bilstm(self):
return self._config.getboolean("Model", "model_bilstm")
@property
def lstm_layers(self):
return self._config.getint("Model", "lstm_layers")
@property
def embed_dim(self):
return self._config.getint("Model", "embed_dim")
@property
def embed_finetune(self):
return self._config.getboolean("Model", "embed_finetune")
@property
def lstm_hiddens(self):
return self._config.getint("Model", "lstm_hiddens")
@property
def dropout_emb(self):
return self._config.getfloat("Model", "dropout_emb")
@property
def dropout(self):
return self._config.getfloat("Model", "dropout")
@property
def conv_filter_sizes(self):
return self._config.get("Model", "conv_filter_sizes")
@property
def conv_filter_nums(self):
return self._config.getint("Model", "conv_filter_nums")
# Optimizer
@property
def adam(self):
return self._config.getboolean("Optimizer", "adam")
@property
def sgd(self):
return self._config.getboolean("Optimizer", "sgd")
@property
def learning_rate(self):
return self._config.getfloat("Optimizer", "learning_rate")
@property
def weight_decay(self):
return self._config.getfloat("Optimizer", "weight_decay")
@property
def momentum(self):
return self._config.getfloat("Optimizer", "momentum")
@property
def clip_max_norm_use(self):
return self._config.getboolean("Optimizer", "clip_max_norm_use")
@property
def clip_max_norm(self):
return self._config.get("Optimizer", "clip_max_norm")
@property
def use_lr_decay(self):
return self._config.getboolean("Optimizer", "use_lr_decay")
@property
def lr_rate_decay(self):
return self._config.getfloat("Optimizer", "lr_rate_decay")
@property
def min_lrate(self):
return self._config.getfloat("Optimizer", "min_lrate")
@property
def max_patience(self):
return self._config.getint("Optimizer", "max_patience")
# Train
@property
def num_threads(self):
return self._config.getint("Train", "num_threads")
@property
def epochs(self):
return self._config.getint("Train", "epochs")
@property
def early_max_patience(self):
return self._config.getint("Train", "early_max_patience")
@property
def backward_batch_size(self):
return self._config.getint("Train", "backward_batch_size")
@property
def batch_size(self):
return self._config.getint("Train", "batch_size")
@property
def dev_batch_size(self):
return self._config.getint("Train", "dev_batch_size")
@property
def test_batch_size(self):
return self._config.getint("Train", "test_batch_size")
@property
def log_interval(self):
return self._config.getint("Train", "log_interval")
| 25.454039 | 76 | 0.644342 |
79583220641f8aa8380091ca42c4d9323f49dd31 | 580 | py | Python | chapter7/ftplib/ftp_download_file_bytes.py | gabrielmahia/ushuhudAI | ee40c9822852f66c6111d1d485dc676b6da70677 | [
"MIT"
] | null | null | null | chapter7/ftplib/ftp_download_file_bytes.py | gabrielmahia/ushuhudAI | ee40c9822852f66c6111d1d485dc676b6da70677 | [
"MIT"
] | null | null | null | chapter7/ftplib/ftp_download_file_bytes.py | gabrielmahia/ushuhudAI | ee40c9822852f66c6111d1d485dc676b6da70677 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from ftplib import FTP
ftp_client=FTP('ftp.be.debian.org')
ftp_client.login()
ftp_client.cwd('/pub/linux/kernel/v5.x/')
ftp_client.voidcmd("TYPE I")
datasock,estsize=ftp_client.ntransfercmd("RETR ChangeLog-5.0")
transbytes=0
file_descryptor=open('ChangeLog-5.0','wb')
while True:
buffer=datasock.recv(2048)
if not len(buffer):
break
file_descryptor.write(buffer)
transbytes +=len(buffer)
print("Bytes received",transbytes,"Total",(estsize,100.0*float(transbytes)/float(estsize)),str('%'))
file_descryptor.close()
datasock.close()
ftp_client.quit()
| 26.363636 | 101 | 0.758621 |
795833530641f0c831bfc851360f6fd6381591ed | 874 | py | Python | Demo/tkinter/matt/window-creation-w-location.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/tkinter/matt/window-creation-w-location.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/tkinter/matt/window-creation-w-location.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | from Tkinter import *
import sys
sys.path.append("/users/mjc4y/projects/python/tkinter/utils")
from TkinterUtils import *
# this shows how to create a new window with a button in it that
# can create new windows
class Test(Frame):
def makeWindow(self, *args):
fred = Toplevel()
fred.label = Canvas (fred, width="2i", height="2i")
fred.label.create_line("0", "0", "2i", "2i")
fred.label.create_line("0", "2i", "2i", "0")
fred.label.pack()
centerWindow(fred, self.master)
def createWidgets(self):
self.QUIT = QuitButton(self)
self.QUIT.pack(side=LEFT, fill=BOTH)
self.makeWindow = Button(self, text='Make a New Window',
width=50, height=20,
command=self.makeWindow)
self.makeWindow.pack(side=LEFT)
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
test = Test()
test.mainloop()
| 22.410256 | 64 | 0.697941 |
7958344e463ae2e738aac29dc43efc9aea25ded9 | 4,207 | py | Python | src/utils.py | Oogway-Technologies/search_app | 401901ab65ecff9500c8edff68234645daeca4cf | [
"Apache-2.0"
] | null | null | null | src/utils.py | Oogway-Technologies/search_app | 401901ab65ecff9500c8edff68234645daeca4cf | [
"Apache-2.0"
] | null | null | null | src/utils.py | Oogway-Technologies/search_app | 401901ab65ecff9500c8edff68234645daeca4cf | [
"Apache-2.0"
] | null | null | null | import json
import requests
from src.const import (QueryType, TDS_QA_ENDPOINT, WIKIFIER_ENDPOINT, WIKIFIER_THRESHOLD)
def get_query_type(query: str) -> QueryType:
if not query:
return QueryType.EMPTY_QUERY
elif query.startswith('explore:'):
return QueryType.EXPLORE_QUERY
elif query.startswith('open:'):
return QueryType.OPEN_QUERY
elif query.startswith('res:'):
return QueryType.RES_SEARCH_QUERY
elif query.startswith('res-explore:'):
return QueryType.RES_EXPLORE_QUERY
elif query.startswith('res-open:'):
return QueryType.RES_OPEN_QUERY
else:
return QueryType.SEARCH_QUERY
def is_bonus_query(query: str) -> bool:
return query.startswith('res:')
def is_tds_qa(query: str):
# Nothing really fancy here...
return query[-1] == '?'
def call_search_endpoint(endpoint: str, search_query: str, num_results: int) -> dict:
url = endpoint
payload = json.dumps({
"query": search_query,
"num_results": num_results
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
if response.status_code != 200:
return {}
return json.loads(response.text)
def call_qa_endpoint(search_query: str, num_results: int, num_reader: int):
url = TDS_QA_ENDPOINT
payload = json.dumps({
"query": search_query,
"num_results": num_results,
"num_reader": num_reader
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
if response.status_code != 200:
return {}
return json.loads(response.text)
def select_root_and_get_cards_list(query: str, root_card_list: list):
card_type_list = query.split(':')
if len(card_type_list) == 1 or not card_type_list[1]:
return None
card_type_list = card_type_list[1:]
card_type_list = [x.strip() for x in card_type_list]
card_type = ' '.join(card_type_list)
for root_card in root_card_list:
if card_type.strip().lower() == root_card.card_type.strip().lower():
return root_card.get_children()
# Try query without spaces
card_type_no_space = card_type.replace(' ', '')
for root_card in root_card_list:
if card_type_no_space.strip().lower() == root_card.card_type.strip().lower().replace(' ', ''):
return root_card.get_children()
# Try starts with
for root_card in root_card_list:
if root_card.card_type.strip().lower().startswith(card_type.strip().lower()):
return root_card.get_children()
return None
def select_card_to_open(query: str, card_list: list):
card_idx_list = query.split(':')
if len(card_idx_list) == 1 or not card_idx_list[1]:
return None
try:
card_idx = int(card_idx_list[1].strip())
except:
return None
if card_idx < 0 or card_idx >= len(card_list):
return None
# Get the Card
card = card_list[card_idx]
# Explore the card
card.open_card()
# Return the opened card
return card
def run_wikifier(text: str):
text = text.replace('\n', ' ')
text = text.replace(' ', ' ')
url = WIKIFIER_ENDPOINT
payload = json.dumps({
"text": text,
"threshold": WIKIFIER_THRESHOLD,
"coref": True
})
headers = {
'Content-Type': 'application/json'
}
try:
response = requests.request("POST", url, headers=headers, data=payload)
except:
return {}
if response.status_code != 200:
return {}
return json.loads(response.text)
def call_restaurant_endpoint(endpoint: str, search_query: str, num_results: int, location_list: list) -> dict:
url = endpoint
payload = json.dumps({
"query": search_query,
"location_list": location_list,
"num_results": num_results
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
if response.status_code != 200:
return {}
return json.loads(response.text)
| 27.496732 | 110 | 0.644878 |
79583490641d50411e6b54dab28cc1ba21507972 | 391 | py | Python | cms/forms/validators.py | petrklus/django-cms | 5094a01afd82e59496e2b09f06e63c4c3147e342 | [
"BSD-3-Clause"
] | 1 | 2018-05-18T06:00:19.000Z | 2018-05-18T06:00:19.000Z | cms/forms/validators.py | petrklus/django-cms | 5094a01afd82e59496e2b09f06e63c4c3147e342 | [
"BSD-3-Clause"
] | null | null | null | cms/forms/validators.py | petrklus/django-cms | 5094a01afd82e59496e2b09f06e63c4c3147e342 | [
"BSD-3-Clause"
] | 1 | 2017-10-17T08:20:32.000Z | 2017-10-17T08:20:32.000Z | from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator, URLValidator
from cms.utils.urlutils import relative_url_regex
def validate_url(value):
try:
# Validate relative urls first
RegexValidator(regex=relative_url_regex)(value)
except ValidationError:
# Fallback to absolute urls
URLValidator()(value)
| 27.928571 | 63 | 0.754476 |
795834e73c1e8fc6306ec1ad60a4f05eca1cc1c0 | 4,607 | py | Python | odoo-13.0/addons/project/tests/test_project_base.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/project/tests/test_project_base.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/project/tests/test_project_base.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo.tests.common import SavepointCase
from odoo.exceptions import UserError
class TestProjectBase(SavepointCase):
@classmethod
def setUpClass(cls):
super(TestProjectBase, cls).setUpClass()
user_group_employee = cls.env.ref('base.group_user')
user_group_project_user = cls.env.ref('project.group_project_user')
user_group_project_manager = cls.env.ref('project.group_project_manager')
cls.partner_1 = cls.env['res.partner'].create({
'name': 'Valid Lelitre',
'email': 'valid.lelitre@agrolait.com'})
cls.partner_2 = cls.env['res.partner'].create({
'name': 'Valid Poilvache',
'email': 'valid.other@gmail.com'})
# Test users to use through the various tests
Users = cls.env['res.users'].with_context({'no_reset_password': True})
cls.user_public = Users.create({
'name': 'Bert Tartignole',
'login': 'bert',
'email': 'b.t@example.com',
'signature': 'SignBert',
'notification_type': 'email',
'groups_id': [(6, 0, [cls.env.ref('base.group_public').id])]})
cls.user_portal = Users.create({
'name': 'Chell Gladys',
'login': 'chell',
'email': 'chell@gladys.portal',
'signature': 'SignChell',
'notification_type': 'email',
'groups_id': [(6, 0, [cls.env.ref('base.group_portal').id])]})
cls.user_projectuser = Users.create({
'name': 'Armande ProjectUser',
'login': 'Armande',
'email': 'armande.projectuser@example.com',
'groups_id': [(6, 0, [user_group_employee.id, user_group_project_user.id])]
})
cls.user_projectmanager = Users.create({
'name': 'Bastien ProjectManager',
'login': 'bastien',
'email': 'bastien.projectmanager@example.com',
'groups_id': [(6, 0, [user_group_employee.id, user_group_project_manager.id])]})
# Test 'Pigs' project
cls.project_pigs = cls.env['project.project'].with_context({'mail_create_nolog': True}).create({
'name': 'Pigs',
'privacy_visibility': 'employees',
'alias_name': 'project+pigs',
'partner_id': cls.partner_1.id})
# Already-existing tasks in Pigs
cls.task_1 = cls.env['project.task'].with_context({'mail_create_nolog': True}).create({
'name': 'Pigs UserTask',
'user_id': cls.user_projectuser.id,
'project_id': cls.project_pigs.id})
cls.task_2 = cls.env['project.task'].with_context({'mail_create_nolog': True}).create({
'name': 'Pigs ManagerTask',
'user_id': cls.user_projectmanager.id,
'project_id': cls.project_pigs.id})
# Test 'Goats' project, same as 'Pigs', but with 2 stages
cls.project_goats = cls.env['project.project'].with_context({'mail_create_nolog': True}).create({
'name': 'Goats',
'privacy_visibility': 'followers',
'alias_name': 'project+goats',
'partner_id': cls.partner_1.id,
'type_ids': [
(0, 0, {
'name': 'New',
'sequence': 1,
}),
(0, 0, {
'name': 'Won',
'sequence': 10,
})]
})
def format_and_process(self, template, to='groups@example.com, other@gmail.com', subject='Frogs',
extra='', email_from='Sylvie Lelitre <test.sylvie.lelitre@agrolait.com>',
cc='', msg_id='<1198923581.41972151344608186760.JavaMail@agrolait.com>',
model=None, target_model='project.task', target_field='name'):
self.assertFalse(self.env[target_model].search([(target_field, '=', subject)]))
mail = template.format(to=to, subject=subject, cc=cc, extra=extra, email_from=email_from, msg_id=msg_id)
self.env['mail.thread'].with_context(mail_channel_noautofollow=True).message_process(model, mail)
return self.env[target_model].search([(target_field, '=', subject)])
def test_delete_project_with_tasks(self):
"""User should never be able to delete a project with tasks"""
with self.assertRaises(UserError):
self.project_pigs.unlink()
# click on the archive button
self.project_pigs.write({'active': False})
with self.assertRaises(UserError):
self.project_pigs.unlink()
| 44.298077 | 112 | 0.576948 |
795836212919b22ecc93c8fcf44de03366dfab7c | 7,736 | py | Python | freshdesk/v2/tests/conftest.py | grumo35/python-freshdesk | 6025c0338e39157c8c1c1aa0c3c6c1fd4d14c490 | [
"BSD-2-Clause"
] | null | null | null | freshdesk/v2/tests/conftest.py | grumo35/python-freshdesk | 6025c0338e39157c8c1c1aa0c3c6c1fd4d14c490 | [
"BSD-2-Clause"
] | null | null | null | freshdesk/v2/tests/conftest.py | grumo35/python-freshdesk | 6025c0338e39157c8c1c1aa0c3c6c1fd4d14c490 | [
"BSD-2-Clause"
] | null | null | null | import json
import os.path
import re
import pytest
from freshdesk.v2.api import API
DOMAIN = "pythonfreshdesk.freshdesk.com"
API_KEY = "MX4CEAw4FogInimEdRW2"
class MockedAPI(API):
def __init__(self, *args):
self.resolver = {
"get": {
re.compile(r"tickets\?filter=new_and_my_open&page=1&per_page=100"): self.read_test_file(
"all_tickets.json"
),
re.compile(r"tickets\?filter=deleted&page=1&per_page=100"): self.read_test_file("all_tickets.json"),
re.compile(r"tickets\?filter=spam&page=1&per_page=100"): self.read_test_file("all_tickets.json"),
re.compile(r"tickets\?filter=watching&page=1&per_page=100"): self.read_test_file("all_tickets.json"),
re.compile(r"tickets\?filter=new_and_my_open&updated_since=2014-01-01&page=1&per_page=100"): self.read_test_file("all_tickets.json"),
re.compile(r"tickets\?page=1&per_page=100"): self.read_test_file("all_tickets.json"),
re.compile(r"tickets/1$"): self.read_test_file("ticket_1.json"),
re.compile(r"tickets/1?include=stats,requester$"): self.read_test_file("ticket_1.json"),
re.compile(r"tickets/1/conversations"): self.read_test_file("conversations.json"),
re.compile(r"tickets/1/time_entries"): self.read_test_file("time_entry_1.json"),
re.compile(r"time_entries"): self.read_test_file("time_entries.json"),
re.compile(r"companies\?page=1&per_page=100$"): self.read_test_file("companies.json"),
re.compile(r"companies/1$"): self.read_test_file("company.json"),
re.compile(r"contacts\?page=1&per_page=100$"): self.read_test_file("contacts.json"),
re.compile(r"contacts/1$"): self.read_test_file("contact.json"),
re.compile(r"customers/1$"): self.read_test_file("customer.json"),
re.compile(r"groups\?page=1&per_page=100$"): self.read_test_file("groups.json"),
re.compile(r"groups/1$"): self.read_test_file("group_1.json"),
re.compile(r"roles$"): self.read_test_file("roles.json"),
re.compile(r"roles/1$"): self.read_test_file("role_1.json"),
re.compile(r"agents\?email=abc@xyz.com&page=1&per_page=100"): self.read_test_file("agent_1.json"),
re.compile(r"agents\?mobile=1234&page=1&per_page=100"): self.read_test_file("agent_1.json"),
re.compile(r"agents\?phone=5678&page=1&per_page=100"): self.read_test_file("agent_1.json"),
re.compile(r"agents\?state=fulltime&page=1&per_page=100"): self.read_test_file("agent_1.json"),
re.compile(r"agents\?page=1&per_page=100"): self.read_test_file("agents.json"),
re.compile(r"agents/1$"): self.read_test_file("agent_1.json"),
re.compile(r'search/companies\?page=1&query="updated_at:>\'2020-07-12\'"'): self.read_test_file("search_companies.json"),
re.compile(r'search/tickets\?page=1&query="tag:\'mytag\'"'): self.read_test_file("search_tickets.json"),
re.compile(r'solutions/categories$'): self.read_test_file("solution_categories.json"),
re.compile(r'solutions/categories/2$'): self.read_first_from_test_file("solution_categories.json"),
re.compile(r'solutions/categories/2/fr$'): self.read_first_from_test_file("solution_categories_fr.json"),
re.compile(r'solutions/categories/2/folders$'): self.read_test_file("solution_folders.json"),
re.compile(r'solutions/categories/2/folders/fr$'): self.read_test_file("solution_folders_fr.json"),
re.compile(r'solutions/folders/3$'): self.read_first_from_test_file("solution_folders.json"),
re.compile(r'solutions/folders/3/fr$'): self.read_first_from_test_file("solution_folders_fr.json"),
re.compile(r'solutions/folders/3/articles$'): self.read_test_file("solution_articles.json"),
re.compile(r'solutions/folders/3/articles/fr$'): self.read_test_file("solution_articles_fr.json"),
re.compile(r'solutions/articles/4$'): self.read_first_from_test_file("solution_articles.json"),
re.compile(r'solutions/articles/4/fr$'): self.read_first_from_test_file("solution_articles_fr.json"),
},
"post": {
re.compile(r"tickets$"): self.read_test_file("ticket_1.json"),
re.compile(r"tickets/outbound_email$"): self.read_test_file("outbound_email_1.json"),
re.compile(r"tickets/1/notes$"): self.read_test_file("note_1.json"),
re.compile(r"tickets/1/reply$"): self.read_test_file("reply_1.json"),
re.compile(r"contacts$"): self.read_test_file("contact.json"),
re.compile(r"companies$"): self.read_test_file("company.json"),
},
"put": {
re.compile(r"tickets/1$"): self.read_test_file("ticket_1_updated.json"),
re.compile(r"contacts/1$"): self.read_test_file("contact_updated.json"),
re.compile(r"contacts/1/restore$"): self.read_test_file("contact.json"),
re.compile(r"contacts/1/make_agent$"): self.read_test_file("contact_1_agent.json"),
re.compile(r"agents/1$"): self.read_test_file("agent_1_updated.json"),
re.compile(r"companies/1$"): self.read_test_file("company_updated.json"),
},
"delete": {
re.compile(r"tickets/1$"): None,
re.compile(r"agents/1$"): None,
re.compile(r"contacts/1$"): None,
re.compile(r"contacts/1/hard_delete\?force=True$"): None,
re.compile(r"companies/1$"): None
},
}
super(MockedAPI, self).__init__(*args)
def read_test_file(self, filename):
path = os.path.join(os.path.dirname(__file__), "sample_json_data", filename)
return json.loads(open(path, "r").read())
def read_first_from_test_file(self, filename):
path = os.path.join(os.path.dirname(__file__), "sample_json_data", filename)
all_objs = json.loads(open(path, "r").read())
return all_objs[0]
def _get(self, url, *args, **kwargs):
for pattern, data in self.resolver["get"].items():
if pattern.match(url):
return data
# No match found, raise 404
from requests.exceptions import HTTPError
raise HTTPError("404: mocked_api_get() has no pattern for '{}'".format(url))
def _post(self, url, *args, **kwargs):
for pattern, data in self.resolver["post"].items():
if pattern.match(url):
return data
# No match found, raise 404
from requests.exceptions import HTTPError
raise HTTPError("404: mocked_api_post() has no pattern for '{}'".format(url))
def _put(self, url, *args, **kwargs):
for pattern, data in self.resolver["put"].items():
if pattern.match(url):
return data
# No match found, raise 404
from requests.exceptions import HTTPError
raise HTTPError("404: mocked_api_put() has no pattern for '{}'".format(url))
def _delete(self, url, *args, **kwargs):
for pattern, data in self.resolver["delete"].items():
if pattern.match(url):
return data
# No match found, raise 404
from requests.exceptions import HTTPError
raise HTTPError("404: mocked_api_delete() has no pattern for '{}'".format(url))
@pytest.fixture()
def api():
return MockedAPI(DOMAIN, API_KEY)
| 55.654676 | 149 | 0.623837 |
7958363ce24f98547a6a9b12cdf2c9918a8c375c | 2,654 | py | Python | ruby_token_builders.py | jfitz/code-stat | dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26 | [
"MIT"
] | null | null | null | ruby_token_builders.py | jfitz/code-stat | dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26 | [
"MIT"
] | null | null | null | ruby_token_builders.py | jfitz/code-stat | dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26 | [
"MIT"
] | null | null | null | from codestat_token import Token
from token_builders import TokenBuilder
# token reader for identifier
class RubyIdentifierTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = None
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'identifier', True)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c.isalpha() or c == '_' or c == '@'
if len(candidate) == 1:
if candidate == '@':
return c.isalpha() or c == '_' or c == '@'
else:
return c.isalpha() or c.isdigit() or c in ['_', '?', '!']
if candidate[-1] in ['?', '!']:
return False
return c.isalpha() or c.isdigit() or c in ['_', '?', '!']
# token reader for identifier
class HereDocTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, operator):
self.text = None
self.operator = operator
def get_tokens(self):
if self.text is None:
return None
# split the text into operator, marker, content, and marker tokens
lines = self.text.split('\n')
oper = lines[0][:3]
marker = lines[-1]
content = Token('\n'.join(lines[1:-1]), 'here doc', False)
op_token = Token(oper, 'operator', False)
mark_token = Token(marker, 'doc marker', True)
newline_token = Token('\n', 'newline', False)
# the marker token is used twice - once at beginning and once at end
return [
op_token,
mark_token,
newline_token,
content,
newline_token,
mark_token
]
def accept(self, candidate, c):
if len(candidate) < len(self.operator):
return self.operator.startswith(candidate)
result = False
if candidate.startswith(self.operator):
result = True
# if the last line begins with the marker from the first line
# stop accepting characters
lines = candidate.split('\n')
if len(lines) > 1:
first_line = lines[0]
last_line = lines[-1]
marker = first_line[len(self.operator):].rstrip()
if last_line.startswith(marker):
result = False
return result
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
lines = self.text.split('\n')
if len(lines) < 2:
return 0
line0 = lines[0].rstrip()
if len(line0) < 4:
return 0
marker = lines[0][3:].rstrip()
last_line = lines[-1].rstrip()
if last_line != marker:
return 0
return len(self.text)
| 23.078261 | 72 | 0.611153 |
795837183bbc024b49b78870d4a898388bcc37ef | 1,933 | py | Python | examples/postman_echo/request_methods/demo_testsuite_yml/request_with_testcase_reference_test.py | BSTester/httprunner | 909e864beb459eb3504a5d9febe148e65c9f146f | [
"Apache-2.0"
] | null | null | null | examples/postman_echo/request_methods/demo_testsuite_yml/request_with_testcase_reference_test.py | BSTester/httprunner | 909e864beb459eb3504a5d9febe148e65c9f146f | [
"Apache-2.0"
] | null | null | null | examples/postman_echo/request_methods/demo_testsuite_yml/request_with_testcase_reference_test.py | BSTester/httprunner | 909e864beb459eb3504a5d9febe148e65c9f146f | [
"Apache-2.0"
] | null | null | null | # NOTE: Generated By HttpRunner v3.1.4
# FROM: request_methods/request_with_testcase_reference.yml
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from airhttprunner import HttpRunner, Config, Step, RunRequest, RunTestCase
from request_methods.request_with_functions_test import (
TestCaseRequestWithFunctions as RequestWithFunctions,
)
class TestCaseRequestWithTestcaseReference(HttpRunner):
config = (
Config("request with referenced testcase")
.variables(
**{
"foo1": "testcase_ref_bar12",
"expect_foo1": "testcase_ref_bar12",
"expect_foo2": "testcase_ref_bar22",
"foo2": "testcase_ref_bar22",
}
)
.base_url("https://postman-echo.com")
.verify(False)
.locust_weight(3)
)
teststeps = [
Step(
RunTestCase("request with functions")
.with_variables(
**{"foo1": "testcase_ref_bar1", "expect_foo1": "testcase_ref_bar1"}
)
.setup_hook("${sleep(0.1)}")
.call(RequestWithFunctions)
.teardown_hook("${sleep(0.2)}")
.export(*["foo3"])
),
Step(
RunRequest("post form data")
.with_variables(**{"foo1": "bar1"})
.post("/post")
.with_headers(
**{
"User-Agent": "HttpRunner/${get_httprunner_version()}",
"Content-Type": "application/x-www-form-urlencoded",
}
)
.with_data("foo1=$foo1&foo2=$foo3")
.validate()
.assert_equal("status_code", 200)
.assert_equal("body.form.foo1", "bar1")
.assert_equal("body.form.foo2", "bar21")
),
]
if __name__ == "__main__":
TestCaseRequestWithTestcaseReference().test_start()
| 28.850746 | 83 | 0.559234 |
7958372da7eb1f2e65bafe28933a89d250958fdc | 6,917 | py | Python | ECE_Night/views.py | biomotion/ECE_Night_2019 | 83e49150848638b9d943f92e0ee87ae0e89c543e | [
"Apache-2.0"
] | null | null | null | ECE_Night/views.py | biomotion/ECE_Night_2019 | 83e49150848638b9d943f92e0ee87ae0e89c543e | [
"Apache-2.0"
] | 5 | 2020-02-12T03:23:38.000Z | 2021-06-10T22:25:14.000Z | ECE_Night/views.py | biomotion/ECE_Night_2019 | 83e49150848638b9d943f92e0ee87ae0e89c543e | [
"Apache-2.0"
] | null | null | null | from rest_framework import viewsets
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from .serializers import ProfileSerializer, UserSerializer, SkillSerializer, SessionSerializer, ProgramSerializer
from django.contrib.auth.models import User
from main.models import Profile, Skill, Session, Program
from rest_framework import serializers
from .permission import IsOwner, IsUser, IsSuperUser, IsBelongToOwner, IsPostAndAuthenticated
class ProfileViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
def update(self, request, pk=None):
profile = Profile.objects.get(pk=pk)
serializer = ProfileSerializer(profile, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_permissions(self):
if self.action == 'list':
self.permission_classes = [IsSuperUser, ]
elif self.action == 'update':
self.permission_classes = [IsBelongToOwner]
elif self.action == 'retrieve':
self.permission_classes = [IsOwner]
return super(self.__class__, self).get_permissions()
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
def get_permissions(self):
if self.action == 'list':
self.permission_classes = [IsSuperUser, ]
elif self.action == 'update':
self.permission_classes = [IsBelongToOwner]
elif self.action == 'retrieve':
self.permission_classes = [IsUser]
return super(self.__class__, self).get_permissions()
class SkillViewSet(viewsets.ModelViewSet):
queryset = Skill.objects.all()
serializer_class = SkillSerializer
def create(self, request):
serializer = SkillSerializer(data=request.data,context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None):
skill = Skill.objects.get(pk=pk)
skill.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def update(self, request, pk=None):
skill = Skill.objects.get(pk=pk)
serializer = SkillSerializer(skill, data=request.data,context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# def partial_update(self, request, pk=None):
# pass
def get_permissions(self):
if self.action == 'list':
self.permission_classes = [IsSuperUser, ]
elif self.action == 'retrieve' or self.action == 'destroy' or self.action == 'update':
self.permission_classes = [IsBelongToOwner]
elif self.action == 'create':
self.permission_classes = [IsPostAndAuthenticated, ]
return super(self.__class__, self).get_permissions()
class SessionViewSet(viewsets.ModelViewSet):
queryset = Session.objects.all()
serializer_class = SessionSerializer
def create(self, request):
serializer = SessionSerializer(data=request.data,context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None):
session = Session.objects.get(pk=pk)
session.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def update(self, request, pk=None):
skill = Session.objects.get(pk=pk)
serializer = SessionSerializer(skill, data=request.data,context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# def partial_update(self, request, pk=None):
# pass
def get_permissions(self):
if self.action == 'list':
self.permission_classes = [IsSuperUser, ]
elif self.action == 'retrieve' or self.action == 'destroy' or self.action == 'update':
self.permission_classes = [IsBelongToOwner]
elif self.action == 'create':
self.permission_classes = [IsPostAndAuthenticated, ]
return super(self.__class__, self).get_permissions()
@api_view(['GET', 'POST', 'DELETE'])
def profile_skill_list(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
queryset = Skill.objects.filter(owner=pk)
try:
skills = Skill.objects.filter(owner=pk)
except Skill.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = SkillSerializer(skills)
return Response(serializer.data)
elif request.method == 'POST':
serializer = SkillSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'POST', 'DELETE'])
@permission_classes((IsBelongToOwner,))
def profile_skill_details(request, pk, fk):
"""
Retrieve, update or delete a code snippet.
"""
queryset = Skill.objects.get(id=fk)
try:
skill = Skill.objects.get(id=fk)
except Skill.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = SkillSerializer(skill)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = SkillSerializer(skill, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
skill.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ProgramViewSet(viewsets.ModelViewSet):
"""
API endpoint allow only get detail and get all
"""
queryset = Program.objects.all()
serializer_class = ProgramSerializer | 38.642458 | 113 | 0.680642 |
7958388150d0469ed8580f8c3434b4c570e5d8d5 | 1,961 | py | Python | PyPoll.py | linearcoffeecup/election-analysis | d8ca539104045f2f3b3b52970cc9cef84f92a8a7 | [
"MIT"
] | null | null | null | PyPoll.py | linearcoffeecup/election-analysis | d8ca539104045f2f3b3b52970cc9cef84f92a8a7 | [
"MIT"
] | null | null | null | PyPoll.py | linearcoffeecup/election-analysis | d8ca539104045f2f3b3b52970cc9cef84f92a8a7 | [
"MIT"
] | null | null | null | import csv
import os
file_to_load = os.path.join("Resources", "election_results.csv")
file_to_save = os.path.join("analysis", "election_analysis.txt")
candidate_votes = {}
candidate_options = []
total_votes = 0
winning_candidate = ""
winning_count = 0
winning_percentage = 0
with open(file_to_load) as election_data:
file_reader = csv.reader(election_data)
headers = next(election_data)
for row in file_reader:
total_votes += 1
candidate_name = row[2]
if candidate_name not in candidate_options:
candidate_options.append(candidate_name)
candidate_votes[candidate_name] = 0
candidate_votes[candidate_name] += 1
with open(file_to_save, "w") as txt_file:
# Print the final vote count to the terminal.
election_results = (
f"\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n")
print(election_results, end="")
# Save the final vote count to the text file.
txt_file.write(election_results)
for candidate_name in candidate_votes:
votes = candidate_votes[candidate_name]
votes_percent = float(votes)/float(total_votes)*100
candidate_results = (f"{candidate_name}: {votes_percent: .1f} ({votes: ,})\n")
print(candidate_results)
txt_file.write(candidate_results)
if (votes > winning_count) and (votes_percent > winning_percentage):
winning_count = votes
winning_percentage = votes_percent
winning_candidate = candidate_name
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n")
print(winning_candidate_summary)
txt_file.write(winning_candidate_summary)
| 31.629032 | 86 | 0.63335 |
795839f89fc322aa4ca29b445f977df710bf7b5e | 10,410 | py | Python | rom_generator/storylineGenerator.py | ikarth/game-boy-rom-generator | 29576a4bbe87a0032f80967d4b740059a65ea5c9 | [
"MIT"
] | 3 | 2021-08-07T03:38:02.000Z | 2021-09-17T14:33:27.000Z | rom_generator/storylineGenerator.py | ikarth/game-boy-rom-generator | 29576a4bbe87a0032f80967d4b740059a65ea5c9 | [
"MIT"
] | null | null | null | rom_generator/storylineGenerator.py | ikarth/game-boy-rom-generator | 29576a4bbe87a0032f80967d4b740059a65ea5c9 | [
"MIT"
] | null | null | null | import argparse
import copy
import random
from rom_generator import script_functions as script
from rom_generator.generator import makeBasicProject, addSceneData, addSpriteSheet, makeSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, makeTrigger
def fullProject():
sprite_sheet_data = [
makeSpriteSheet('actor.png', name='actor', type='actor', frames=3),
makeSpriteSheet('cat.png', name='cat', type='static', frames=1),
makeSpriteSheet('checkbox.png', name='checkbox', type='actor', frames=3),
makeSpriteSheet('connector.png', name='connector', type='animated', frames=2),
makeSpriteSheet('dog.png', name='dog', type='static', frames=1),
makeSpriteSheet('duck.png', name='duck', type='animated', frames=2),
makeSpriteSheet('GreenBlock.png', name='GreenBlock', type='static', frames=1),
makeSpriteSheet('ice.png', name='ice', type='static', frames=1),
makeSpriteSheet('key_00.png', name='key_00', type='static', frames=1),
makeSpriteSheet('MazeBlock.png', name='MazeBlock', type='static', frames=1),
makeSpriteSheet('npc001.png', name='npc001', type='actor', frames=3),
makeSpriteSheet('npc002.png', name='npc002', type='actor', frames=3),
makeSpriteSheet('npc003.png', name='npc003', type='actor_animated', frames=6),
makeSpriteSheet('player.png', name='player', type='actor_animated', frames=6),
makeSpriteSheet('radio.png', name='radio', type='static', frames=1),
makeSpriteSheet('rock.png', name='rock', type='static', frames=1),
makeSpriteSheet('sage.png', name='sage', type='static', frames=1),
makeSpriteSheet('savepoint.png', name='savepoint', type='animated', frames=2),
makeSpriteSheet('signpost.png', name='signpost', type='static', frames=1),
makeSpriteSheet('static.png', name='static', type='static', frames=1),
makeSpriteSheet('tower.png', name='tower', type='static', frames=1),
makeSpriteSheet('torch.png', name='torch', type='static', frames=1),
makeSpriteSheet('fire.png', name='fire', type='animated', frames=4),
]
def findSpriteByName(sprite_name):
'''
Returns first sprite that matches the name given.
'''
try:
result = [s for s in sprite_sheet_data if (s['name'] == sprite_name)][0]
if None == result:
print(f"missing {sprite_name}")
return result
except:
print(f"Missing {sprite_name}")
return None
def scene_gen_Cave_00002(callback):
actor_00 = makeActor(None, 4, 6, 'faceInteraction', moveSpeed=1, animSpeed=3, direction='down', script=[], sprite_id=findSpriteByName("torch")["id"])
actor_01 = makeActor(None, 4, 4, 'static', animate=True, moveSpeed=1, animSpeed=4, direction='down', script=[], sprite_id=findSpriteByName("fire")["id"])
actor_02 = makeActor(None, 9, 7, 'static', direction='down', script=[], sprite_id=findSpriteByName("sage")["id"])
actor_02['script'] = [
script.text(text='In this game we are\' to learn about COVID\n'),
script.text(text='Let us get\nstarted!'),
script.text(text='Also, try going\nto the rock.'),
script.setTrue(variable='7'),
script.end()
]
actor_03 = makeActor(None, 14, 6, 'faceInteraction', moveSpeed=1, animSpeed=3, direction='down', script=[], sprite_id=findSpriteByName("torch")["id"])
actor_04 = makeActor(None, 14, 4, 'static', animate=True, moveSpeed=1, animSpeed=4, direction='down', script=[], sprite_id=findSpriteByName("fire")["id"])
actor_05 = makeActor(None, 14, 11, 'static', animate=True, moveSpeed=1, animSpeed=2, direction='down', script=[], sprite_id=findSpriteByName("savepoint")["id"])
actor_05['script'] = [
script.text(text='Is losing your\'taste a symptom of\ngetting COVID?'),
script.choice(variable='11', trueText='True', falseText='False'),
script.ifTrue(variable='11', children = {
'true': [script.text(text='You got it right!'), script.text(text='Let us continue'), script.end()],
'false': [script.text(text='You got it wrong!'), script.text(text='It is a new symptom.')]
}),
script.end()
]
actor_list = [actor_00, actor_01, actor_02, actor_03, actor_04, actor_05]
trigger_00 = makeTrigger('trigger_00', 9, 17, 2, 1)
trigger_list = []
collision_data_list = [0, 0, 0, 0, 0, 0, 0, 224, 255, 127, 2, 0, 36, 0, 64, 2, 0, 36, 0, 64, 2, 0, 36, 0, 64, 2, 0, 36, 0, 64, 2, 0, 36, 0, 64, 2, 0, 36, 0, 64, 254, 249, 7, 144, 0]
gen_scene_bkg = makeBackground("cave.png")
gen_scene_scn = makeScene("_gen_Cave", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label=scene_gen_Cave_00002)
gen_scene_connections = []
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "tags": []}
return scene_data
def scene_gen_Stars_00004(callback):
actor_00 = makeActor(None, 15, 12, 'static', direction='down', script=[], sprite_id=findSpriteByName('dog')['id'])
actor_00['script'] = [
script.text(text='Does COVID make you dizzy?'),
script.choice(variable='11', trueText='True', falseText='False'),
script.ifTrue(variable='11', children = {
'true': [script.text(text='You got it right!'), script.text(text='Let us continue'), script.end()],
'false': [script.text(text='You got it wrong!'), script.text(text='It is a new symptom.')]
}),
script.end()
]
actor_list = [actor_00]
trigger_list = []
collision_data_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
gen_scene_bkg = makeBackground("stars.png")
gen_scene_scn = makeScene("_gen_Stars", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label=scene_gen_Stars_00004)
gen_scene_connections = []
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "tags": []}
return scene_data
def createConnection(scene, x, y):
global connections, sceneA, x2, y2
#scene["triggers"].append(gen.makeTrigger(scene, 3, 3, 2, 1, []))
sceneA = scene
x2 = x
y2 = y
def storylineGenerator():
"""
Create an empty world as an example to build future projects from.
"""
# Set up a barebones project
project = makeBasicProject()
# Add a background image
default_bkg = makeBackground("placeholder.png", "placeholder")
project.backgrounds.append(default_bkg)
player_sprite_sheet = addSpriteSheet(project, "actor_animated.png", "actor_animated", "actor_animated", frames=6)
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
#a_rock_sprite = addSpriteSheet(project, "rock.png", "rock", "static")
#main_actor = makeActor(player_sprite_sheet, 10, 11, "static")
#first_to_second = addSymmetricSceneConnections(project, first_scene, second_scene, "right", None)
first_scene = scene_gen_Cave_00002(None)
second_scene = scene_gen_Stars_00004(None)
createConnection(first_scene,10, 12)
# Create sprite sheet for the player sprite
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
# Get information about the background
#bkg_x = default_bkg["imageWidth"]
#bkg_y = default_bkg["imageHeight"]
#bkg_width = default_bkg["width"]
#bkg_height = default_bkg["height"]
# add a sprite to indicate the location of a doorway
# a better way to do this in the actual levels is to alter the background image instead
#doorway_sprite = addSpriteSheet(project, "tower.png", "tower", "static")
# Add scene to project
for element_sprite in sprite_sheet_data:
project.spriteSheets.append(element_sprite)
scene_data_list = [first_scene, second_scene]
for sdata in scene_data_list:
# uncomment when you have generator.translateReferences: generator.addSceneData(project, generator.translateReferences(sdata, scene_data_list))
addSceneData(project, sdata)
project.backgrounds.append(sdata["background"])
# Add some music
project.music.append(makeMusic("template", "template.mod"))
# Set the starting scene
#project.settings["startSceneId"] = project.scenes[0]["id"]
return project
def getProject():
return project
return storylineGenerator
# Utilities
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
### Run the generator
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate a Game Boy ROM via a GB Studio project file.")
parser.add_argument('--destination', '-d', type=str, help="destination folder name", default="../gbprojects/projects/storyline_01")
args = parser.parse_args()
initializeGenerator()
gen_func = fullProject()
project = gen_func()
writeProjectToDisk(project, output_path = args.destination)
| 55.079365 | 415 | 0.608453 |
79583a54d49a6b25b409ec17e270b6fb7970ede5 | 16,484 | py | Python | rtv/mime_parsers.py | jordanrossetti/rtv | c6546b8e77463a5606ef56c86e054e248d197080 | [
"MIT"
] | null | null | null | rtv/mime_parsers.py | jordanrossetti/rtv | c6546b8e77463a5606ef56c86e054e248d197080 | [
"MIT"
] | null | null | null | rtv/mime_parsers.py | jordanrossetti/rtv | c6546b8e77463a5606ef56c86e054e248d197080 | [
"MIT"
] | 2 | 2018-05-01T21:40:39.000Z | 2018-05-02T20:43:35.000Z | import re
import logging
import mimetypes
import requests
from bs4 import BeautifulSoup
_logger = logging.getLogger(__name__)
class BaseMIMEParser(object):
"""
BaseMIMEParser can be sub-classed to define custom handlers for determining
the MIME type of external urls.
"""
pattern = re.compile(r'.*$')
@staticmethod
def get_mimetype(url):
"""
Guess based on the file extension.
Args:
url (text): Web url that was linked to by a reddit submission.
Returns:
modified_url (text): The url (or filename) that will be used when
constructing the command to run.
content_type (text): The mime-type that will be used when
constructing the command to run. If the mime-type is unknown,
return None and the program will fallback to using the web
browser.
"""
filename = url.split('?')[0]
filename = filename.split('#')[0]
content_type, _ = mimetypes.guess_type(filename)
return url, content_type
class OpenGraphMIMEParser(BaseMIMEParser):
"""
Open graph protocol is used on many web pages.
<meta property="og:image" content="https://xxxx.jpg?ig_cache_key=xxxxx" />
<meta property="og:video:secure_url" content="https://xxxxx.mp4" />
If the page is a video page both of the above tags will be present and
priority is given to video content.
see http://ogp.me
"""
pattern = re.compile(r'.*$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
for og_type in ['video', 'image']:
prop = 'og:' + og_type + ':secure_url'
tag = soup.find('meta', attrs={'property': prop})
if not tag:
prop = 'og:' + og_type
tag = soup.find('meta', attrs={'property': prop})
if tag:
return BaseMIMEParser.get_mimetype(tag.get('content'))
return url, None
class GfycatMIMEParser(BaseMIMEParser):
"""
Gfycat provides a primitive json api to generate image links. URLs can be
downloaded as either gif, webm, or mjpg. Webm was selected because it's
fast and works with VLC.
https://gfycat.com/api
https://gfycat.com/UntidyAcidicIberianemeraldlizard -->
https://giant.gfycat.com/UntidyAcidicIberianemeraldlizard.webm
"""
pattern = re.compile(r'https?://(www\.)?gfycat\.com/[^.]+$')
@staticmethod
def get_mimetype(url):
parts = url.replace('gifs/detail/', '').split('/')
api_url = '/'.join(parts[:-1] + ['cajax', 'get'] + parts[-1:])
resp = requests.get(api_url)
image_url = resp.json()['gfyItem']['webmUrl']
return image_url, 'video/webm'
class YoutubeMIMEParser(BaseMIMEParser):
"""
Youtube videos can be streamed with vlc or downloaded with youtube-dl.
Assign a custom mime-type so they can be referenced in mailcap.
"""
pattern = re.compile(
r'(?:https?://)?(m\.)?(?:youtu\.be/|(?:www\.)?youtube\.com/watch'
r'(?:\.php)?\'?.*v=)([a-zA-Z0-9\-_]+)')
@staticmethod
def get_mimetype(url):
return url, 'video/x-youtube'
class GifvMIMEParser(BaseMIMEParser):
"""
Special case for .gifv, which is a custom video format for imgur serves
as html with a special <video> frame. Note that attempting for download as
.webm also returns this html page. However, .mp4 appears to return the raw
video file.
"""
pattern = re.compile(r'.*[.]gifv$')
@staticmethod
def get_mimetype(url):
modified_url = url[:-4] + 'mp4'
return modified_url, 'video/mp4'
class RedditUploadsMIMEParser(BaseMIMEParser):
"""
Reddit uploads do not have a file extension, but we can grab the mime-type
from the page header.
"""
pattern = re.compile(r'https://i\.reddituploads\.com/.+$')
@staticmethod
def get_mimetype(url):
page = requests.head(url)
content_type = page.headers.get('Content-Type', '')
content_type = content_type.split(';')[0] # Strip out the encoding
return url, content_type
class RedditVideoMIMEParser(BaseMIMEParser):
"""
Reddit hosted videos/gifs.
Media uses MPEG-DASH format (.mpd)
"""
pattern = re.compile(r'https://v\.redd\.it/.+$')
@staticmethod
def get_mimetype(url):
request_url = url + '/DASHPlaylist.mpd'
page = requests.get(request_url)
soup = BeautifulSoup(page.content, 'html.parser')
if not soup.find('representation', attrs={'mimetype': 'audio/mp4'}):
reps = soup.find_all('representation',
attrs={'mimetype': 'video/mp4'})
rep = sorted(reps, reverse=True,
key=lambda t: int(t.get('bandwidth')))[0]
return url + '/' + rep.find('baseurl').text, 'video/mp4'
return request_url, 'video/x-youtube'
class ImgurApiMIMEParser(BaseMIMEParser):
"""
Imgur now provides a json API exposing its entire infrastructure. Each Imgur
page has an associated hash and can either contain an album, a gallery,
or single image.
The default client token for RTV is shared among users and allows a maximum
global number of requests per day of 12,500. If we find that this limit is
not sufficient for all of rtv's traffic, this method will be revisited.
Reference:
https://apidocs.imgur.com
"""
CLIENT_ID = None
pattern = re.compile(
r'https?://(w+\.)?(m\.)?imgur\.com/'
r'((?P<domain>a|album|gallery)/)?(?P<hash>[a-zA-Z0-9]+)$')
@classmethod
def get_mimetype(cls, url):
endpoint = 'https://api.imgur.com/3/{domain}/{page_hash}'
headers = {'authorization': 'Client-ID {0}'.format(cls.CLIENT_ID)}
m = cls.pattern.match(url)
page_hash = m.group('hash')
if m.group('domain') in ('a', 'album'):
domain = 'album'
else:
# This could be a gallery or a single image, but there doesn't
# seem to be a way to reliably distinguish between the two.
# Assume a gallery, which appears to be more common, and fallback
# to an image request upon failure.
domain = 'gallery'
if not cls.CLIENT_ID:
return cls.fallback(url, domain)
api_url = endpoint.format(domain=domain, page_hash=page_hash)
r = requests.get(api_url, headers=headers)
if domain == 'gallery' and r.status_code != 200:
# Not a gallery, try to download using the image endpoint
api_url = endpoint.format(domain='image', page_hash=page_hash)
r = requests.get(api_url, headers=headers)
if r.status_code != 200:
_logger.warning('Imgur API failure, status %s', r.status_code)
return cls.fallback(url, domain)
data = r.json().get('data')
if not data:
_logger.warning('Imgur API failure, resp %s', r.json())
return cls.fallback(url, domain)
if 'images' in data and len(data['images']) > 1:
# TODO: handle imgur albums with mixed content, i.e. jpeg and gifv
link = ' '.join([d['link'] for d in data['images'] if not d['animated']])
mime = 'image/x-imgur-album'
else:
data = data['images'][0] if 'images' in data else data
# this handles single image galleries
link = data['mp4'] if data['animated'] else data['link']
mime = 'video/mp4' if data['animated'] else data['type']
link = link.replace('http://', 'https://')
return link, mime
@classmethod
def fallback(cls, url, domain):
"""
Attempt to use one of the scrapers if the API doesn't work
"""
if domain == 'album':
return ImgurScrapeAlbumMIMEParser.get_mimetype(url)
else:
return ImgurScrapeMIMEParser.get_mimetype(url)
class ImgurScrapeMIMEParser(BaseMIMEParser):
"""
The majority of imgur links don't point directly to the image, so we need
to open the provided url and scrape the page for the link.
Scrape the actual image url from an imgur landing page. Imgur intentionally
obscures this on most reddit links in order to draw more traffic for their
advertisements.
There are a couple of <meta> tags that supply the relevant info:
<meta name="twitter:image" content="https://i.imgur.com/xrqQ4LEh.jpg">
<meta property="og:image" content="http://i.imgur.com/xrqQ4LE.jpg?fb">
<link rel="image_src" href="http://i.imgur.com/xrqQ4LE.jpg">
"""
pattern = re.compile(r'https?://(w+\.)?(m\.)?imgur\.com/[^.]+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
tag = soup.find('meta', attrs={'name': 'twitter:image'})
if tag:
url = tag.get('content')
if GifvMIMEParser.pattern.match(url):
return GifvMIMEParser.get_mimetype(url)
return BaseMIMEParser.get_mimetype(url)
class ImgurScrapeAlbumMIMEParser(BaseMIMEParser):
"""
Imgur albums can contain several images, which need to be scraped from the
landing page. Assumes the following html structure:
<div class="post-image">
<a href="//i.imgur.com/L3Lfp1O.jpg" class="zoom">
<img class="post-image-placeholder"
src="//i.imgur.com/L3Lfp1Og.jpg" alt="Close up">
<img class="js-post-image-thumb"
src="//i.imgur.com/L3Lfp1Og.jpg" alt="Close up">
</a>
</div>
"""
pattern = re.compile(r'https?://(w+\.)?(m\.)?imgur\.com/a(lbum)?/[^.]+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
urls = []
for div in soup.find_all('div', class_='post-image'):
img = div.find('img')
src = img.get('src') if img else None
if src:
urls.append('http:{0}'.format(src))
if urls:
return " ".join(urls), 'image/x-imgur-album'
return url, None
class InstagramMIMEParser(OpenGraphMIMEParser):
"""
Instagram uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?instagr((am\.com)|\.am)/p/[^.]+$')
class StreamableMIMEParser(OpenGraphMIMEParser):
"""
Streamable uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?streamable\.com/[^.]+$')
class TwitchMIMEParser(BaseMIMEParser):
"""
Non-streaming videos hosted by twitch.tv
"""
pattern = re.compile(r'https?://clips\.?twitch\.tv/[^.]+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
tag = soup.find('meta', attrs={'name': 'twitter:image'})
thumbnail = tag.get('content')
suffix = '-preview.jpg'
if thumbnail.endswith(suffix):
return thumbnail.replace(suffix, '.mp4'), 'video/mp4'
return url, None
class OddshotMIMEParser(OpenGraphMIMEParser):
"""
Oddshot uses the Open Graph protocol
"""
pattern = re.compile(r'https?://oddshot\.tv/s(hot)?/[^.]+$')
class VidmeMIMEParser(BaseMIMEParser):
"""
Vidme provides a json api.
https://doc.vid.me
"""
pattern = re.compile(r'https?://(www\.)?vid\.me/[^.]+$')
@staticmethod
def get_mimetype(url):
resp = requests.get('https://api.vid.me/videoByUrl?url=' + url)
if resp.status_code == 200 and resp.json()['status']:
return resp.json()['video']['complete_url'], 'video/mp4'
return url, None
class LiveleakMIMEParser(BaseMIMEParser):
"""
https://www.liveleak.com/view?i=12c_3456789
<video>
<source src="https://cdn.liveleak.com/..mp4" res="HD" type="video/mp4">
<source src="https://cdn.liveleak.com/..mp4" res="SD" type="video/mp4">
</video>
Sometimes only one video source is available
"""
pattern = re.compile(r'https?://((www|m)\.)?liveleak\.com/view\?i=\w+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
urls = []
videos = soup.find_all('video')
for vid in videos:
source = vid.find('source', attr={'res': 'HD'})
source = source or vid.find('source')
if source:
urls.append((source.get('src'), source.get('type')))
# TODO: Handle pages with multiple videos
if urls:
return urls[0]
def filter_iframe(t):
return t.name == 'iframe' and 'youtube.com' in t['src']
iframe = soup.find_all(filter_iframe)
if iframe:
return YoutubeMIMEParser.get_mimetype(iframe[0]['src'].strip('/'))
return url, None
class ClippitUserMIMEParser(BaseMIMEParser):
"""
Clippit uses a video player container
"""
pattern = re.compile(r'https?://(www\.)?clippituser\.tv/c/.+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
tag = soup.find(id='jwplayer-container')
quality = ['data-{}-file'.format(_) for _ in ['hd', 'sd']]
return tag.get(quality[0]), 'video/mp4'
class GifsMIMEParser(OpenGraphMIMEParser):
"""
Gifs.com uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?gifs\.com/gif/.+$')
class GiphyMIMEParser(OpenGraphMIMEParser):
"""
Giphy.com uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?giphy\.com/gifs/.+$')
class ImgtcMIMEParser(OpenGraphMIMEParser):
"""
imgtc.com uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?imgtc\.com/w/.+$')
class ImgflipMIMEParser(OpenGraphMIMEParser):
"""
imgflip.com uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?imgflip\.com/i/.+$')
class LivememeMIMEParser(OpenGraphMIMEParser):
"""
livememe.com uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?livememe\.com/[^.]+$')
class MakeamemeMIMEParser(OpenGraphMIMEParser):
"""
makeameme.com uses the Open Graph protocol
"""
pattern = re.compile(r'https?://(www\.)?makeameme\.org/meme/.+$')
class FlickrMIMEParser(OpenGraphMIMEParser):
"""
Flickr uses the Open Graph protocol
"""
# TODO: handle albums/photosets (https://www.flickr.com/services/api)
pattern = re.compile(r'https?://(www\.)?flickr\.com/photos/[^/]+/[^/]+/?$')
class WorldStarHipHopMIMEParser(BaseMIMEParser):
"""
<video>
<source src="https://hw-mobile.worldstarhiphop.com/..mp4" type="video/mp4">
<source src="" type="video/mp4">
</video>
Sometimes only one video source is available
"""
pattern = re.compile(r'https?://((www|m)\.)?worldstarhiphop\.com/videos/video.php\?v=\w+$')
@staticmethod
def get_mimetype(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
def filter_source(t):
return t.name == 'source' and t['src'] and t['type'] == 'video/mp4'
source = soup.find_all(filter_source)
if source:
return source[0]['src'], 'video/mp4'
def filter_iframe(t):
return t.name == 'iframe' and 'youtube.com' in t['src']
iframe = soup.find_all(filter_iframe)
if iframe:
return YoutubeMIMEParser.get_mimetype(iframe[0]['src'])
return url, None
# Parsers should be listed in the order they will be checked
parsers = [
ClippitUserMIMEParser,
OddshotMIMEParser,
StreamableMIMEParser,
VidmeMIMEParser,
InstagramMIMEParser,
GfycatMIMEParser,
ImgurApiMIMEParser,
RedditUploadsMIMEParser,
RedditVideoMIMEParser,
YoutubeMIMEParser,
LiveleakMIMEParser,
TwitchMIMEParser,
FlickrMIMEParser,
GifsMIMEParser,
GiphyMIMEParser,
ImgtcMIMEParser,
ImgflipMIMEParser,
LivememeMIMEParser,
MakeamemeMIMEParser,
WorldStarHipHopMIMEParser,
GifvMIMEParser,
BaseMIMEParser]
| 31.883946 | 95 | 0.607802 |
79583b65bc32731cf10fef4385086438d3bfe149 | 2,860 | py | Python | vpc_core_infra/vpcx_cdk/vpc_update_handler.py | aws-samples/amz-vpc-provisioning-api-sls | d6779d24f8f0aa46e53cee17b7d94af401daeb18 | [
"MIT-0"
] | null | null | null | vpc_core_infra/vpcx_cdk/vpc_update_handler.py | aws-samples/amz-vpc-provisioning-api-sls | d6779d24f8f0aa46e53cee17b7d94af401daeb18 | [
"MIT-0"
] | null | null | null | vpc_core_infra/vpcx_cdk/vpc_update_handler.py | aws-samples/amz-vpc-provisioning-api-sls | d6779d24f8f0aa46e53cee17b7d94af401daeb18 | [
"MIT-0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# pylint: disable=line-too-long
"""VPC Update handler"""
import json
from vpc_core_infra.vpcx_cdk.vpc_base import VpcHandler, METADATA_TEMPLATE_BUCKET, STATIC_CF_TEMPLATE_BUCKET
from vpc_core_infra.vpcx_cdk import cdk_orchestrator
from vpc_core_infra.vpcx_cdk.vpc_context import VpcContext
class UpdateVpcHandler(VpcHandler):
@staticmethod
def validate_request(request_body):
account_alias = request_body.get('account_alias', None)
vpcx_name = request_body.get('vpcx_name', None)
if account_alias is None or vpcx_name is None:
return False, {
'statusCode': 400,
'body': "Missing a required field"
}
context_data = cdk_orchestrator.get_context_data_from_metadata_store(METADATA_TEMPLATE_BUCKET,
account_alias,
vpcx_name)
if context_data is None:
return False, {
'statusCode': 404,
'body': "No VPCx VPC found."
}
return True, VpcContext.init_from_storage(context_data)
def process(self, event):
auth_error = self.auth(event)
if auth_error:
return auth_error
request_body = json.loads(event['body'])
validate_success, validate_results = self.validate_request(request_body)
if not validate_success:
return validate_results
vpc_context = validate_results
metadata_account_result, metadata_account_session = self.get_cross_account_session('METADATA_ACCOUNT_ALIAS')
if not metadata_account_result:
return {
'statusCode': 404,
'body': 'No metadata account found.'
}
target_result, target_account_session = self.get_cross_account_session(request_body["account_alias"], vpc_context.region)
if not target_result:
return {
'statusCode': 404,
'body': 'No account found.'
}
# Get external configs
external_configs = cdk_orchestrator.get_external_configs(request_body.get('region'),
STATIC_CF_TEMPLATE_BUCKET,
metadata_account_session)
# Generate CF template
stack_name, cf_template = cdk_orchestrator.build_cf_template(vpc_context, external_configs)
# Kickoff stack update
return self.update_cf_stack(
cf_template,
stack_name,
target_account_session.client('cloudformation'),
vpc_context
) | 42.058824 | 129 | 0.599301 |
79583be703f29a897f912085f0a239bfa18c8862 | 582 | py | Python | celery/wip/j.py | noahwilliamsson/protokollen | feb6e4bb5934d83646f9954ad24b3e7670d18817 | [
"BSD-2-Clause"
] | 1 | 2019-12-11T16:54:38.000Z | 2019-12-11T16:54:38.000Z | celery/wip/j.py | noahwilliamsson/protokollen | feb6e4bb5934d83646f9954ad24b3e7670d18817 | [
"BSD-2-Clause"
] | null | null | null | celery/wip/j.py | noahwilliamsson/protokollen | feb6e4bb5934d83646f9954ad24b3e7670d18817 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import json
import sys
# http://docs.python.org/2/library/urlparse.html
from urlparse import urlparse
if len(sys.argv) < 2:
print "Usage:", sys.argv[0], "<file.json>"
raise SystemExit
data = open(sys.argv[1]).read()
har = json.loads(data)
domain_map = {}
for e in har['log']['entries']:
url = e['request']['url']
o = urlparse(url)
# Create list at key if not already present
domain_map[o.netloc] = domain_map.get(o.netloc, [])
domain_map[o.netloc].append(url)
for d, list in domain_map.iteritems():
print d
for u in list:
print "\t", u[:30]
| 19.4 | 52 | 0.675258 |
79583c47d8e3d6e3af5a34c380906f513f94f2bd | 4,008 | py | Python | tests/unit/states/win_snmp_test.py | fictivekin/salt | f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00 | [
"Apache-2.0"
] | 2 | 2015-09-21T14:13:30.000Z | 2016-02-12T11:33:46.000Z | tests/unit/states/win_snmp_test.py | fictivekin/salt | f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00 | [
"Apache-2.0"
] | null | null | null | tests/unit/states/win_snmp_test.py | fictivekin/salt | f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00 | [
"Apache-2.0"
] | 2 | 2017-01-05T16:14:59.000Z | 2019-01-31T23:15:25.000Z | # -*- coding: utf-8 -*-
'''
:synopsis: Unit Tests for Windows SNMP Module 'state.win_snmp'
:platform: Windows
:maturity: develop
versionadded:: Nitrogen
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Libs
from salt.states import win_snmp
import salt.ext.six as six
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON,
)
ensure_in_syspath('../../')
# Globals
win_snmp.__salt__ = {}
win_snmp.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinSnmpTestCase(TestCase):
'''
Test cases for salt.modules.win_snmp
'''
def test_agent_settings(self):
'''
Test - Manage the SNMP sysContact, sysLocation, and sysServices settings.
'''
kwargs = {'name': 'agent-settings', 'contact': 'TestContact',
'location': 'TestLocation', 'services': ['Internet']}
ret = {
'name': kwargs['name'],
'changes': {},
'comment': 'Agent settings already contain the provided values.',
'result': True
}
# Using this instead of dictionary comprehension in order to make pylint happy.
get_ret = dict((key, value) for (key, value) in six.iteritems(kwargs) if key != 'name')
mock_value_get = MagicMock(return_value=get_ret)
mock_value_set = MagicMock(return_value=True)
with patch.dict(win_snmp.__salt__, {'win_snmp.get_agent_settings': mock_value_get,
'win_snmp.set_agent_settings': mock_value_set}):
with patch.dict(win_snmp.__opts__, {'test': False}):
self.assertEqual(win_snmp.agent_settings(**kwargs), ret)
def test_auth_traps_enabled(self):
'''
Test - Manage the sending of authentication traps.
'''
kwargs = {'name': 'auth-traps', 'status': True}
ret = {
'name': kwargs['name'],
'changes': {
'old': False,
'new': True
},
'comment': 'Set EnableAuthenticationTraps to contain the provided value.',
'result': True
}
mock_value_get = MagicMock(return_value=False)
mock_value_set = MagicMock(return_value=True)
with patch.dict(win_snmp.__salt__, {'win_snmp.get_auth_traps_enabled': mock_value_get,
'win_snmp.set_auth_traps_enabled': mock_value_set}):
with patch.dict(win_snmp.__opts__, {'test': False}):
self.assertEqual(win_snmp.auth_traps_enabled(**kwargs), ret)
with patch.dict(win_snmp.__opts__, {'test': True}):
ret['comment'] = 'EnableAuthenticationTraps will be changed.'
ret['result'] = None
self.assertEqual(win_snmp.auth_traps_enabled(**kwargs), ret)
def test_community_names(self):
'''
Test - Manage the SNMP accepted community names and their permissions.
'''
kwargs = {'name': 'community-names', 'communities': {'TestCommunity': 'Read Create'}}
ret = {
'name': kwargs['name'],
'changes': {},
'comment': 'Communities already contain the provided values.',
'result': True
}
mock_value_get = MagicMock(return_value=kwargs['communities'])
mock_value_set = MagicMock(return_value=True)
with patch.dict(win_snmp.__salt__, {'win_snmp.get_community_names': mock_value_get,
'win_snmp.set_community_names': mock_value_set}):
with patch.dict(win_snmp.__opts__, {'test': False}):
self.assertEqual(win_snmp.community_names(**kwargs), ret)
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(WinSnmpTestCase, needs_daemon=False)
| 37.457944 | 96 | 0.608782 |
79583d4dbed315f4064558677b0c97ed5bcda5dd | 2,920 | py | Python | TvTimeWrapper/TvTimeUtils.py | Aymendje/Trakt2TvTime | 9cfc3c68ec43ede81e952c40a37b8cf35057a210 | [
"MIT"
] | 1 | 2022-01-06T18:51:48.000Z | 2022-01-06T18:51:48.000Z | TvTimeWrapper/TvTimeUtils.py | Aymendje/Trakt2TvTime | 9cfc3c68ec43ede81e952c40a37b8cf35057a210 | [
"MIT"
] | null | null | null | TvTimeWrapper/TvTimeUtils.py | Aymendje/Trakt2TvTime | 9cfc3c68ec43ede81e952c40a37b8cf35057a210 | [
"MIT"
] | null | null | null | from Users.UsersManager import TvTimeUser
from time import sleep
from typing import Dict, List, Any
import requests
import logging
from bs4 import BeautifulSoup
from TvTimeWrapper import TvTimeLogin
HEADERS={ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36' }
def update_tvtime_cookies(auth: dict, cookies: Any) -> None:
old_session_cookie = auth['symfony']
old_remember_cookie = auth['tvstRemember']
auth['symfony'] = cookies.get('symfony', old_session_cookie)
auth['tvstRemember'] = cookies.get('tvstRemember', old_remember_cookie)
def get(url: str, update_session=True, user: TvTimeLogin = None, throw : bool = False) -> requests.Response:
try:
resp = requests.get(url, headers=HEADERS, cookies=user and user.Authorisation)
ThrowOnError(resp, user, update_session)
return resp
except Exception as err:
CheckForRetry(err, throw, user)
return get(url=url, update_session=update_session, user=user, throw=True)
def post(url: str, data: Dict[str, Any], update_session=True, user: TvTimeLogin = None, throw : bool = False) -> requests.Response:
try:
resp = requests.post(url, data=data, headers=HEADERS, cookies=user and user.Authorisation)
ThrowOnError(resp, user, update_session)
return resp
except Exception as err:
CheckForRetry(err, throw, user)
return post(url=url, data=data, update_session=update_session, user=user, throw=True)
def put(url: str, data: Dict[str, Any], update_session=True, user: TvTimeLogin = None, throw : bool = False) -> requests.Response:
try:
resp = requests.put(url, data=data, headers=HEADERS, cookies=user and user.Authorisation)
ThrowOnError(resp, user, update_session)
return resp
except Exception as err:
CheckForRetry(err, throw, user)
return put(url=url, data=data, update_session=update_session, user=user, throw=True)
def delete(url: str, data: Dict[str, Any], update_session=True, user: TvTimeLogin = None, throw : bool = False) -> requests.Response:
try:
resp = requests.delete(url, data=data, headers=HEADERS, cookies=user and user.Authorisation)
ThrowOnError(resp, user, update_session)
return resp
except Exception as err:
CheckForRetry(err, throw, user)
return delete(url=url, data=data, update_session=update_session, user=user, throw=True)
def ThrowOnError(resp: requests.Response, user: TvTimeUser, update_session : bool):
resp.raise_for_status()
if update_session:
update_tvtime_cookies(user.Authorisation, resp.cookies)
def CheckForRetry(err: Exception, throw: bool, user: TvTimeUser ):
logging.error("Exception : {0}. Will throw : ".format(err, throw), exc_info=True)
if throw:
raise err
sleep(4.5)
TvTimeLogin.Login(user)
sleep(0.5)
| 42.941176 | 133 | 0.707534 |
79583e5675327f7abdbe26f15ba7daf7f52bbe1e | 2,607 | py | Python | utils/gen_link_script.py | ontio/libcxx-mirror | 4b4f32ea383deb28911f5618126c6ea6c110b5e4 | [
"Apache-2.0"
] | null | null | null | utils/gen_link_script.py | ontio/libcxx-mirror | 4b4f32ea383deb28911f5618126c6ea6c110b5e4 | [
"Apache-2.0"
] | 1 | 2019-04-21T16:53:33.000Z | 2019-04-21T17:15:25.000Z | utils/gen_link_script.py | ontio/libcxx-mirror | 4b4f32ea383deb28911f5618126c6ea6c110b5e4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
import sys
def print_and_exit(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def usage_and_exit():
print_and_exit("Usage: ./gen_link_script.py [--help] [--dryrun] <path/to/libcxx.so> <public_libs>...")
def help_and_exit():
help_msg = \
"""Usage
gen_link_script.py [--help] [--dryrun] <path/to/libcxx.so> <public_libs>...
Generate a linker script that links libc++ to the proper ABI library.
The script replaces the specified libc++ symlink.
An example script for c++abi would look like "INPUT(libc++.so.1 -lc++abi)".
Arguments
<path/to/libcxx.so> - The top level symlink to the versioned libc++ shared
library. This file is replaced with a linker script.
<public_libs> - List of library names to include in linker script.
Exit Status:
0 if OK,
1 if the action failed.
"""
print_and_exit(help_msg)
def parse_args():
args = list(sys.argv)
del args[0]
if len(args) == 0:
usage_and_exit()
if args[0] == '--help':
help_and_exit()
dryrun = '--dryrun' == args[0]
if dryrun:
del args[0]
if len(args) < 2:
usage_and_exit()
symlink_file = args[0]
public_libs = args[1:]
return dryrun, symlink_file, public_libs
def main():
dryrun, symlink_file, public_libs = parse_args()
# Check that the given libc++.so file is a valid symlink.
if not os.path.islink(symlink_file):
print_and_exit("symlink file %s is not a symlink" % symlink_file)
# Read the symlink so we know what libc++ to link to in the linker script.
linked_libcxx = os.readlink(symlink_file)
# Prepare the list of public libraries to link.
public_libs = ['-l%s' % l for l in public_libs]
# Generate the linker script contents and print the script and destination
# information.
contents = "INPUT(%s %s)" % (linked_libcxx, ' '.join(public_libs))
print("GENERATING SCRIPT: '%s' as file %s" % (contents, symlink_file))
# Remove the existing libc++ symlink and replace it with the script.
if not dryrun:
os.unlink(symlink_file)
with open(symlink_file, 'w') as f:
f.write(contents + "\n")
if __name__ == '__main__':
main()
| 31.035714 | 106 | 0.616417 |
79583e68ac6a5d0eb83125be6819a88a0c28abf0 | 8,797 | py | Python | napari/layers/utils/_color_encoding.py | jojoelfe/napari | b52a136dad392c091b0008c0b8d7fcc5ef460f66 | [
"BSD-3-Clause"
] | 7 | 2018-07-03T17:35:46.000Z | 2018-11-07T15:48:58.000Z | napari/layers/utils/_color_encoding.py | maweigert/napari | 48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0 | [
"BSD-3-Clause"
] | 120 | 2018-09-04T22:05:13.000Z | 2019-03-02T01:13:57.000Z | napari/layers/utils/_color_encoding.py | maweigert/napari | 48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0 | [
"BSD-3-Clause"
] | 8 | 2018-09-04T21:48:26.000Z | 2019-01-29T04:48:30.000Z | from typing import Any, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, parse_obj_as, validator
from typing_extensions import Protocol, runtime_checkable
from ...utils import Colormap
from ...utils.colormaps import ValidColormapArg, ensure_colormap
from ...utils.colormaps.categorical_colormap import CategoricalColormap
from ...utils.colormaps.standardize_color import transform_color
from ...utils.translations import trans
from .color_transformations import ColorType
from .style_encoding import (
StyleEncoding,
_ConstantStyleEncoding,
_DerivedStyleEncoding,
_ManualStyleEncoding,
)
class ColorValue(np.ndarray):
"""A 4x1 array that represents one RGBA color value."""
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
return transform_color(val)[0]
class ColorArray(np.ndarray):
"""An Nx4 array where each row of N represents one RGBA color value."""
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
return (
np.empty((0, 4), np.float32)
if len(val) == 0
else transform_color(val)
)
@runtime_checkable
class ColorEncoding(StyleEncoding[ColorValue, ColorArray], Protocol):
"""Encodes colors from features."""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls, value: Union['ColorEncoding', dict, str, ColorType]
) -> 'ColorEncoding':
"""Validates and coerces a value to a ColorEncoding.
Parameters
----------
value : ColorEncodingArgument
The value to validate and coerce.
If this is already a ColorEncoding, it is returned as is.
If this is a dict, then it should represent one of the built-in color encodings.
If this a string, then a DirectColorEncoding is returned.
If this a single color, a ConstantColorEncoding is returned.
If this is a sequence of colors, a ManualColorEncoding is returned.
Returns
-------
ColorEncoding
Raises
------
TypeError
If the value is not a supported type.
ValidationError
If the value cannot be parsed into a ColorEncoding.
"""
if isinstance(value, ColorEncoding):
return value
if isinstance(value, dict):
return parse_obj_as(
Union[
ConstantColorEncoding,
ManualColorEncoding,
DirectColorEncoding,
NominalColorEncoding,
QuantitativeColorEncoding,
],
value,
)
if isinstance(value, str):
return DirectColorEncoding(feature=value, fallback=DEFAULT_COLOR)
try:
color_array = ColorArray.validate_type(value)
except (ValueError, AttributeError, KeyError):
raise TypeError(
trans._(
'value should be a ColorEncoding, a dict, a string, a color, a sequence of colors, or None',
deferred=True,
)
)
if color_array.shape[0] == 1:
return ConstantColorEncoding(constant=value)
return ManualColorEncoding(array=color_array, default=DEFAULT_COLOR)
"""The default color to use, which may also be used a safe fallback color."""
DEFAULT_COLOR = ColorValue.validate_type('cyan')
class ConstantColorEncoding(_ConstantStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values from a single constant color.
Attributes
----------
constant : ColorValue
The constant color RGBA value.
"""
encoding_type: Literal['ConstantColorEncoding'] = 'ConstantColorEncoding'
constant: ColorValue
class ManualColorEncoding(_ManualStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values manually in an array attribute.
Attributes
----------
array : ColorArray
The array of color values. Can be written to directly to make
persistent updates.
default : ColorValue
The default color value.
"""
encoding_type: Literal['ManualColorEncoding'] = 'ManualColorEncoding'
array: ColorArray
default: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)
class DirectColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values directly from a feature column.
Attributes
----------
feature : str
The name of the feature that contains the desired color values.
fallback : ColorArray
The safe constant fallback color to use if the feature column
does not contain valid color values.
"""
encoding_type: Literal['DirectColorEncoding'] = 'DirectColorEncoding'
feature: str
fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)
def __call__(self, features: Any) -> ColorArray:
# A column-like may be a series or have an object dtype (e.g. color names),
# neither of which transform_color handles, so convert to a list.
return ColorArray.validate_type(list(features[self.feature]))
class NominalColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values from a nominal feature whose values are mapped to colors.
Attributes
----------
feature : str
The name of the feature that contains the nominal values to be mapped to colors.
colormap : CategoricalColormap
Maps the feature values to colors.
fallback : ColorValue
The safe constant fallback color to use if mapping the feature values to
colors fails.
"""
encoding_type: Literal['NominalColorEncoding'] = 'NominalColorEncoding'
feature: str
colormap: CategoricalColormap
fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)
def __call__(self, features: Any) -> ColorArray:
# map is not expecting some column-likes (e.g. pandas.Series), so ensure
# this is a numpy array first.
values = np.asarray(features[self.feature])
return self.colormap.map(values)
class QuantitativeColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values from a quantitative feature whose values are mapped to colors.
Attributes
----------
feature : str
The name of the feature that contains the nominal values to be mapped to colors.
colormap : Colormap
Maps feature values to colors.
contrast_limits : Optional[Tuple[float, float]]
The (min, max) feature values that should respectively map to the first and last
colors in the colormap. If None, then this will attempt to calculate these values
from the feature values each time this generates color values. If that attempt
fails, these are effectively (0, 1).
fallback : ColorValue
The safe constant fallback color to use if mapping the feature values to
colors fails.
"""
encoding_type: Literal[
'QuantitativeColorEncoding'
] = 'QuantitativeColorEncoding'
feature: str
colormap: Colormap
contrast_limits: Optional[Tuple[float, float]] = None
fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)
def __call__(self, features: Any) -> ColorArray:
values = features[self.feature]
contrast_limits = self.contrast_limits or _calculate_contrast_limits(
values
)
if contrast_limits is not None:
values = np.interp(values, contrast_limits, (0, 1))
return self.colormap.map(values)
@validator('colormap', pre=True, always=True)
def _check_colormap(cls, colormap: ValidColormapArg) -> Colormap:
return ensure_colormap(colormap)
@validator('contrast_limits', pre=True, always=True)
def _check_contrast_limits(
cls, contrast_limits
) -> Optional[Tuple[float, float]]:
if (contrast_limits is not None) and (
contrast_limits[0] >= contrast_limits[1]
):
raise ValueError(
'contrast_limits must be a strictly increasing pair of values'
)
return contrast_limits
def _calculate_contrast_limits(
values: np.ndarray,
) -> Optional[Tuple[float, float]]:
contrast_limits = None
if values.size > 0:
min_value = np.min(values)
max_value = np.max(values)
# Use < instead of != to handle nans.
if min_value < max_value:
contrast_limits = (min_value, max_value)
return contrast_limits
| 33.965251 | 112 | 0.663522 |
79583eca26a7012105d7d5eb2bf0f10661682504 | 58,938 | py | Python | app/modules/blog_posts/migrations/0023_auto_20200728_0910.py | nickmoreton/nhsx-website | 2397d1308376c02b75323d30e6bc916af0daac9d | [
"MIT"
] | 50 | 2019-04-04T17:50:00.000Z | 2021-08-05T15:08:37.000Z | app/modules/blog_posts/migrations/0023_auto_20200728_0910.py | nickmoreton/nhsx-website | 2397d1308376c02b75323d30e6bc916af0daac9d | [
"MIT"
] | 434 | 2019-04-04T18:25:32.000Z | 2022-03-31T18:23:37.000Z | app/modules/blog_posts/migrations/0023_auto_20200728_0910.py | nhsx-mirror/nhsx-website | 2133b4e275ca35ff77f7d6874e809f139ec4bf86 | [
"MIT"
] | 23 | 2019-04-04T09:52:07.000Z | 2021-04-11T07:41:47.000Z | # Generated by Django 3.0.4 on 2020-07-28 09:10
from django.db import migrations
import modules.core.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
import wagtailnhsukfrontend.blocks
class Migration(migrations.Migration):
dependencies = [
("blog_posts", "0022_auto_20200724_1308"),
]
operations = [
migrations.AlterField(
model_name="blogpost",
name="body",
field=wagtail.core.fields.StreamField(
[
("rich_text", wagtail.core.blocks.RichTextBlock(group=" Content")),
(
"block_quote",
wagtail.core.blocks.BlockQuoteBlock(group=" Content"),
),
("embed", modules.core.blocks.EmbedBlock(group=" Content")),
(
"captioned_embed",
wagtail.core.blocks.StructBlock(
[
("embed", modules.core.blocks.EmbedBlock()),
(
"title",
wagtail.core.blocks.CharBlock(required=False),
),
(
"sub_title",
wagtail.core.blocks.CharBlock(required=False),
),
],
group=" Content",
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(required=False),
),
],
group=" NHS Components",
),
),
(
"panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(required=False),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
(
"promo",
wagtail.core.blocks.StructBlock(
[
(
"link_page",
wagtail.core.blocks.PageChooserBlock(
label="Page", required=False
),
),
(
"url",
wagtail.core.blocks.URLBlock(
label="URL", required=False
),
),
(
"heading",
wagtail.core.blocks.CharBlock(required=True),
),
(
"description",
wagtail.core.blocks.CharBlock(required=False),
),
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
label="Image", required=False
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(required=False),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[("", "Default"), ("small", "Small")],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
],
group=" NHS Components",
),
),
(
"expander",
wagtail.core.blocks.StructBlock(
[
("title", wagtail.core.blocks.CharBlock(required=True)),
(
"body",
wagtail.core.blocks.StreamBlock(
[
(
"richtext",
wagtail.core.blocks.RichTextBlock(),
),
(
"action_link",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
label="Link text",
required=True,
),
),
(
"external_url",
wagtail.core.blocks.URLBlock(
label="URL",
required=True,
),
),
(
"new_window",
wagtail.core.blocks.BooleanBlock(
label="Open in new window",
required=False,
),
),
]
),
),
(
"inset_text",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
)
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(
required=False
),
),
]
),
),
(
"grey_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading",
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"warning_callout",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important",
required=True,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"summary_list",
wagtail.core.blocks.StructBlock(
[
(
"rows",
wagtail.core.blocks.ListBlock(
wagtailnhsukfrontend.blocks.SummaryListRowBlock
),
),
(
"no_border",
wagtail.core.blocks.BooleanBlock(
default=False,
required=False,
),
),
]
),
),
("table", modules.core.blocks.TableBlock()),
],
required=True,
),
),
],
group=" NHS Components",
),
),
(
"grey_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading", required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
(
"inset_text",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
)
],
group=" NHS Components",
),
),
(
"panel_list",
wagtail.core.blocks.StructBlock(
[
(
"panels",
wagtail.core.blocks.ListBlock(
wagtail.core.blocks.StructBlock(
[
(
"left_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"right_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
]
)
),
)
],
group=" NHS Components",
),
),
(
"promo_group",
wagtail.core.blocks.StructBlock(
[
(
"column",
wagtail.core.blocks.ChoiceBlock(
choices=[
("one-half", "One-half"),
("one-third", "One-third"),
]
),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[("", "Default"), ("small", "Small")],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"promos",
wagtail.core.blocks.ListBlock(
modules.core.blocks.BasePromoBlock
),
),
],
group=" NHS Components",
),
),
(
"warning_callout",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important", required=True
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
("table", modules.core.blocks.TableBlock(group=" NHS Components")),
(
"panel_table",
wagtail.core.blocks.StructBlock(
[
("title", wagtail.core.blocks.CharBlock()),
("table", modules.core.blocks.TableBlock()),
],
group=" NHS Components",
),
),
(
"action_link",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
label="Link text", required=True
),
),
(
"external_url",
wagtail.core.blocks.URLBlock(
label="URL", required=True
),
),
(
"new_window",
wagtail.core.blocks.BooleanBlock(
label="Open in new window", required=False
),
),
],
group=" NHS Components",
),
),
],
blank=True,
verbose_name="Body blocks",
),
),
migrations.AlterField(
model_name="blogpostindexpage",
name="body",
field=wagtail.core.fields.StreamField(
[
("rich_text", wagtail.core.blocks.RichTextBlock(group=" Content")),
(
"block_quote",
wagtail.core.blocks.BlockQuoteBlock(group=" Content"),
),
("embed", modules.core.blocks.EmbedBlock(group=" Content")),
(
"captioned_embed",
wagtail.core.blocks.StructBlock(
[
("embed", modules.core.blocks.EmbedBlock()),
(
"title",
wagtail.core.blocks.CharBlock(required=False),
),
(
"sub_title",
wagtail.core.blocks.CharBlock(required=False),
),
],
group=" Content",
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(required=False),
),
],
group=" NHS Components",
),
),
(
"panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(required=False),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
(
"promo",
wagtail.core.blocks.StructBlock(
[
(
"link_page",
wagtail.core.blocks.PageChooserBlock(
label="Page", required=False
),
),
(
"url",
wagtail.core.blocks.URLBlock(
label="URL", required=False
),
),
(
"heading",
wagtail.core.blocks.CharBlock(required=True),
),
(
"description",
wagtail.core.blocks.CharBlock(required=False),
),
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
label="Image", required=False
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(required=False),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[("", "Default"), ("small", "Small")],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
],
group=" NHS Components",
),
),
(
"expander",
wagtail.core.blocks.StructBlock(
[
("title", wagtail.core.blocks.CharBlock(required=True)),
(
"body",
wagtail.core.blocks.StreamBlock(
[
(
"richtext",
wagtail.core.blocks.RichTextBlock(),
),
(
"action_link",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
label="Link text",
required=True,
),
),
(
"external_url",
wagtail.core.blocks.URLBlock(
label="URL",
required=True,
),
),
(
"new_window",
wagtail.core.blocks.BooleanBlock(
label="Open in new window",
required=False,
),
),
]
),
),
(
"inset_text",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
)
]
),
),
(
"image",
wagtail.core.blocks.StructBlock(
[
(
"content_image",
wagtail.images.blocks.ImageChooserBlock(
required=True
),
),
(
"alt_text",
wagtail.core.blocks.CharBlock(
help_text="Only leave this blank if the image is decorative.",
required=False,
),
),
(
"caption",
wagtail.core.blocks.CharBlock(
required=False
),
),
]
),
),
(
"grey_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading",
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"warning_callout",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important",
required=True,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"summary_list",
wagtail.core.blocks.StructBlock(
[
(
"rows",
wagtail.core.blocks.ListBlock(
wagtailnhsukfrontend.blocks.SummaryListRowBlock
),
),
(
"no_border",
wagtail.core.blocks.BooleanBlock(
default=False,
required=False,
),
),
]
),
),
("table", modules.core.blocks.TableBlock()),
],
required=True,
),
),
],
group=" NHS Components",
),
),
(
"grey_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
label="heading", required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
(
"inset_text",
wagtail.core.blocks.StructBlock(
[
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
)
],
group=" NHS Components",
),
),
(
"panel_list",
wagtail.core.blocks.StructBlock(
[
(
"panels",
wagtail.core.blocks.ListBlock(
wagtail.core.blocks.StructBlock(
[
(
"left_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
(
"right_panel",
wagtail.core.blocks.StructBlock(
[
(
"label",
wagtail.core.blocks.CharBlock(
required=False
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(
required=True
),
),
]
),
),
]
)
),
)
],
group=" NHS Components",
),
),
(
"promo_group",
wagtail.core.blocks.StructBlock(
[
(
"column",
wagtail.core.blocks.ChoiceBlock(
choices=[
("one-half", "One-half"),
("one-third", "One-third"),
]
),
),
(
"size",
wagtail.core.blocks.ChoiceBlock(
choices=[("", "Default"), ("small", "Small")],
required=False,
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
),
),
(
"promos",
wagtail.core.blocks.ListBlock(
modules.core.blocks.BasePromoBlock
),
),
],
group=" NHS Components",
),
),
(
"warning_callout",
wagtail.core.blocks.StructBlock(
[
(
"title",
wagtail.core.blocks.CharBlock(
default="Important", required=True
),
),
(
"heading_level",
wagtail.core.blocks.IntegerBlock(
default=3,
help_text="The heading level affects users with screen readers. Default=3, Min=2, Max=4.",
max_value=4,
min_value=2,
required=True,
),
),
(
"body",
wagtail.core.blocks.RichTextBlock(required=True),
),
],
group=" NHS Components",
),
),
("table", modules.core.blocks.TableBlock(group=" NHS Components")),
(
"panel_table",
wagtail.core.blocks.StructBlock(
[
("title", wagtail.core.blocks.CharBlock()),
("table", modules.core.blocks.TableBlock()),
],
group=" NHS Components",
),
),
(
"action_link",
wagtail.core.blocks.StructBlock(
[
(
"text",
wagtail.core.blocks.CharBlock(
label="Link text", required=True
),
),
(
"external_url",
wagtail.core.blocks.URLBlock(
label="URL", required=True
),
),
(
"new_window",
wagtail.core.blocks.BooleanBlock(
label="Open in new window", required=False
),
),
],
group=" NHS Components",
),
),
],
blank=True,
verbose_name="Body blocks",
),
),
]
| 55.549482 | 192 | 0.1874 |
79583f344f0c1f642586c4a8ecc08f2aa4e24008 | 8,189 | py | Python | python/paddle/nn/__init__.py | Huangheyl/Paddle | a1b640bc66a5cc9583de503e7406aeba67565e8d | [
"Apache-2.0"
] | 8 | 2019-06-16T12:36:11.000Z | 2021-03-05T05:33:21.000Z | python/paddle/nn/__init__.py | Huangheyl/Paddle | a1b640bc66a5cc9583de503e7406aeba67565e8d | [
"Apache-2.0"
] | 1 | 2020-09-10T09:05:52.000Z | 2020-09-10T09:06:22.000Z | python/paddle/nn/__init__.py | Huangheyl/Paddle | a1b640bc66a5cc9583de503e7406aeba67565e8d | [
"Apache-2.0"
] | 25 | 2019-12-07T02:14:14.000Z | 2021-12-30T06:16:30.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: import all neural network related api under this directory,
# including layers, linear, conv, rnn etc.
from .layer import norm
from .functional import extension
from .layer import common
from .layer import rnn
from .utils import weight_norm_hook
from . import initializer
__all__ = []
__all__ += norm.__all__
__all__ += extension.__all__
__all__ += common.__all__
__all__ += rnn.__all__
__all__ += weight_norm_hook.__all__
# TODO: define alias in nn directory
# from .clip import ErrorClipByValue #DEFINE_ALIAS
from .clip import GradientClipByGlobalNorm #DEFINE_ALIAS
from .clip import GradientClipByNorm #DEFINE_ALIAS
from .clip import GradientClipByValue #DEFINE_ALIAS
# from .clip import set_gradient_clip #DEFINE_ALIAS
from .clip import clip #DEFINE_ALIAS
from .clip import clip_by_norm #DEFINE_ALIAS
from .control_flow import case #DEFINE_ALIAS
from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import DynamicRNN #DEFINE_ALIAS
# from .control_flow import StaticRNN #DEFINE_ALIAS
from .control_flow import switch_case #DEFINE_ALIAS
from .control_flow import while_loop #DEFINE_ALIAS
# from .control_flow import rnn #DEFINE_ALIAS
# from .decode import BeamSearchDecoder #DEFINE_ALIAS
# from .decode import Decoder #DEFINE_ALIAS
from .decode import beam_search #DEFINE_ALIAS
from .decode import beam_search_decode #DEFINE_ALIAS
# from .decode import crf_decoding #DEFINE_ALIAS
# from .decode import ctc_greedy_decoder #DEFINE_ALIAS
# from .decode import dynamic_decode #DEFINE_ALIAS
from .decode import gather_tree #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS
from .layer.activation import ELU
from .layer.activation import GELU
from .layer.activation import Tanh
from .layer.activation import Hardshrink
from .layer.activation import Hardtanh
from .layer.activation import PReLU
from .layer.activation import ReLU
from .layer.activation import ReLU6 #DEFINE_ALIAS
from .layer.activation import SELU #DEFINE_ALIAS
from .layer.activation import LeakyReLU #DEFINE_ALIAS
from .layer.activation import Sigmoid #DEFINE_ALIAS
from .layer.activation import LogSigmoid
from .layer.activation import Softmax #DEFINE_ALIAS
from .layer.activation import Softplus #DEFINE_ALIAS
from .layer.activation import Softshrink #DEFINE_ALIAS
from .layer.activation import Softsign #DEFINE_ALIAS
from .layer.activation import Tanhshrink #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.activation import HSigmoid #DEFINE_ALIAS
from .layer.common import BilinearTensorProduct #DEFINE_ALIAS
from .layer.common import Pool2D #DEFINE_ALIAS
from .layer.common import Pad2D #DEFINE_ALIAS
from .layer.common import ReflectionPad1d #DEFINE_ALIAS
from .layer.common import ReplicationPad1d #DEFINE_ALIAS
from .layer.common import ConstantPad1d #DEFINE_ALIAS
from .layer.common import ReflectionPad2d #DEFINE_ALIAS
from .layer.common import ReplicationPad2d #DEFINE_ALIAS
from .layer.common import ConstantPad2d #DEFINE_ALIAS
from .layer.common import ZeroPad2d #DEFINE_ALIAS
from .layer.common import ReplicationPad3d #DEFINE_ALIAS
from .layer.common import ConstantPad3d #DEFINE_ALIAS
from .layer.common import CosineSimilarity #DEFINE_ALIAS
from .layer.common import Embedding #DEFINE_ALIAS
from .layer.common import Linear #DEFINE_ALIAS
from .layer.common import Flatten #DEFINE_ALIAS
from .layer.common import Upsample #DEFINE_ALIAS
from .layer.common import UpsamplingNearest2d #DEFINE_ALIAS
from .layer.common import UpsamplingBilinear2d #DEFINE_ALIAS
from .layer.common import Bilinear #DEFINE_ALIAS
from .layer.common import Dropout #DEFINE_ALIAS
from .layer.common import Dropout2d #DEFINE_ALIAS
from .layer.common import Dropout3d #DEFINE_ALIAS
from .layer.common import AlphaDropout #DEFINE_ALIAS
from .layer.pooling import AvgPool1d #DEFINE_ALIAS
from .layer.pooling import AvgPool2d #DEFINE_ALIAS
from .layer.pooling import AvgPool3d #DEFINE_ALIAS
from .layer.pooling import MaxPool1d #DEFINE_ALIAS
from .layer.pooling import MaxPool2d #DEFINE_ALIAS
from .layer.pooling import MaxPool3d #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool1d #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool2d #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool3d #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool1d #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool2d #DEFINE_ALIAS
from .layer.pooling import AdaptiveMaxPool3d #DEFINE_ALIAS
from .layer.conv import Conv1d #DEFINE_ALIAS
from .layer.conv import Conv2d #DEFINE_ALIAS
from .layer.conv import Conv3d #DEFINE_ALIAS
from .layer.conv import ConvTranspose1d #DEFINE_ALIAS
from .layer.conv import ConvTranspose2d #DEFINE_ALIAS
from .layer.conv import ConvTranspose3d #DEFINE_ALIAS
# from .layer.conv import TreeConv #DEFINE_ALIAS
# from .layer.conv import Conv1D #DEFINE_ALIAS
from .layer.extension import RowConv #DEFINE_ALIAS
# from .layer.learning_rate import CosineDecay #DEFINE_ALIAS
# from .layer.learning_rate import ExponentialDecay #DEFINE_ALIAS
# from .layer.learning_rate import InverseTimeDecay #DEFINE_ALIAS
# from .layer.learning_rate import NaturalExpDecay #DEFINE_ALIAS
# from .layer.learning_rate import NoamDecay #DEFINE_ALIAS
# from .layer.learning_rate import PiecewiseDecay #DEFINE_ALIAS
# from .layer.learning_rate import PolynomialDecay #DEFINE_ALIAS
from .layer.common import Linear
# from .layer.loss import NCELoss #DEFINE_ALIAS
from .layer.loss import BCEWithLogitsLoss #DEFINE_ALIAS
from .layer.loss import CrossEntropyLoss #DEFINE_ALIAS
from .layer.loss import MSELoss #DEFINE_ALIAS
from .layer.loss import L1Loss #DEFINE_ALIAS
from .layer.loss import NLLLoss #DEFINE_ALIAS
from .layer.loss import BCELoss #DEFINE_ALIAS
from .layer.loss import KLDivLoss #DEFINE_ALIAS
from .layer.loss import MarginRankingLoss #DEFINE_ALIAS
from .layer.loss import CTCLoss #DEFINE_ALIAS
from .layer.loss import SmoothL1Loss #DEFINE_ALIAS
from .layer.norm import BatchNorm #DEFINE_ALIAS
from .layer.norm import SyncBatchNorm #DEFINE_ALIAS
from .layer.norm import GroupNorm #DEFINE_ALIAS
from .layer.norm import LayerNorm #DEFINE_ALIAS
from .layer.norm import SpectralNorm #DEFINE_ALIAS
from .layer.norm import InstanceNorm #DEFINE_ALIAS
from .layer.norm import InstanceNorm1d #DEFINE_ALIAS
from .layer.norm import InstanceNorm2d #DEFINE_ALIAS
from .layer.norm import InstanceNorm3d #DEFINE_ALIAS
from .layer.norm import BatchNorm1d #DEFINE_ALIAS
from .layer.norm import BatchNorm2d #DEFINE_ALIAS
from .layer.norm import BatchNorm3d #DEFINE_ALIAS
from .layer.rnn import *
# from .layer.rnn import RNNCell #DEFINE_ALIAS
# from .layer.rnn import GRUCell #DEFINE_ALIAS
# from .layer.rnn import LSTMCell #DEFINE_ALIAS
from .layer.transformer import MultiHeadAttention
from .layer.transformer import TransformerEncoderLayer
from .layer.transformer import TransformerEncoder
from .layer.transformer import TransformerDecoderLayer
from .layer.transformer import TransformerDecoder
from .layer.transformer import Transformer
from .layer.distance import PairwiseDistance #DEFINE_ALIAS
from .layer.vision import PixelShuffle
from .layer import loss #DEFINE_ALIAS
from .layer import conv #DEFINE_ALIAS
from .layer import vision #DEFINE_ALIAS
from ..fluid.dygraph.layers import Layer #DEFINE_ALIAS
from ..fluid.dygraph.container import LayerList, ParameterList, Sequential #DEFINE_ALIAS
| 47.33526 | 89 | 0.805227 |
79583f47a242721357ae5c7b21cb7a25e9220446 | 1,625 | py | Python | core/game/player.py | 8area8/MacGyver-The-game- | 2ada7ac2a2b02eb697600200bbbd5bf82b2cd073 | [
"Apache-2.0"
] | null | null | null | core/game/player.py | 8area8/MacGyver-The-game- | 2ada7ac2a2b02eb697600200bbbd5bf82b2cd073 | [
"Apache-2.0"
] | null | null | null | core/game/player.py | 8area8/MacGyver-The-game- | 2ada7ac2a2b02eb697600200bbbd5bf82b2cd073 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
# coding: utf-8
"""Player module."""
from pygame.sprite import Group
from core.modules.constants import CASE_PIXELS, SPEED, DIRECTION, UPSCALE
class Player:
"""Player's class."""
def __init__(self, image, t_coords):
"""Initialize the player."""
self.items = Items()
self.image = image
self.t_coords = t_coords
self.in_moove = False
self.pace = None
self.destination = None
@property
def r_coords(self):
"""Return the true coordinates."""
x, y = self.t_coords
return x // CASE_PIXELS // UPSCALE, y // CASE_PIXELS // UPSCALE
def start_moove(self, x, y, direction):
"""Start a new player moove."""
self.in_moove = True
self.destination = x * CASE_PIXELS * UPSCALE, y * CASE_PIXELS * UPSCALE
dx, dy = DIRECTION[direction]
self.pace = dx * SPEED, dy * SPEED
def moove(self):
"""Moove the player."""
x, y = self.t_coords
px, py = self.pace
self.t_coords = x + px, y + py
if self.t_coords == self.destination:
self.in_moove = False
class Items(Group):
"""Little class for the items sprites."""
def __init__(self):
"""Initialize the class."""
super().__init__()
def add(self, sprite=None):
"""Add a new item.
Remove another.
"""
if not sprite:
return
if len(self) == 3:
removing = [i for i in self if i.coords == sprite.coords][0]
self.remove(removing)
super().add(sprite)
| 25 | 79 | 0.56 |
79583fd58863286e21be1ebfa699f1b821ef0629 | 2,041 | py | Python | tests/data/program_analysis/arrays/arrays-basic-06_lambdas.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 25 | 2018-03-03T11:57:57.000Z | 2022-01-16T21:19:54.000Z | tests/data/program_analysis/arrays/arrays-basic-06_lambdas.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 385 | 2018-02-21T16:52:06.000Z | 2022-02-17T07:44:56.000Z | tests/data/program_analysis/arrays/arrays-basic-06_lambdas.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 19 | 2018-03-20T01:08:11.000Z | 2021-09-29T01:04:49.000Z | from numbers import Real
from random import random
from delphi.translators.for2py.strings import *
import numpy as np
import delphi.translators.for2py.math_ext as math
def arrays_basic_06__main__assign__a__0():
a = [[0] * (1 - -(3)), [0] * (5 - 0), [0] * (14 - 10)]
return a
def arrays_basic_06__main__loop_0__assign__i__0():
return 3
def arrays_basic_06__main__loop_0__condition__IF_0__0(i):
return 0 <= i < 2
def arrays_basic_06__main__loop_0__decision__EXIT__0(IF_0_0):
return IF_0_0
def arrays_basic_06__main__loop_0__loop_1__assign__j__0():
return 1
def arrays_basic_06__main__loop_0__loop_1__condition__IF_0__0(j):
return 0 <= j < 6
def arrays_basic_06__main__loop_0__loop_1__decision__EXIT__0(IF_0_0):
return IF_0_0
def arrays_basic_06__main__loop_0__loop_1__loop_2__assign__k__0():
return 10
def arrays_basic_06__main__loop_0__loop_1__loop_2__condition__IF_0__0(k):
return 0 <= k < 15
def arrays_basic_06__main__loop_0__loop_1__loop_2__decision__EXIT__0(IF_0_0):
return IF_0_0
def arrays_basic_06__main__loop_0__loop_1__loop_2__assign__a_ijk__0(a, i: int, j: int, k: int):
a[i][j][k] = ((i+j)+k)
return a[i][j][k]
def arrays_basic_06__main__loop_0__loop_1__loop_2__assign_k__1(k):
return k + 1
def arrays_basic_06__main__loop_0__loop_1__assign_j__1(j):
return j + 1
def arrays_basic_06__main__loop_0__assign_i__1(i):
return i + 1
def arrays_basic_06__main__loop_3__assign__i__0():
return 3
def arrays_basic_06__main__loop_3__condition__IF_0__0(i):
return 0 <= i < 2
def arrays_basic_06__main__loop_3__decision__EXIT__0(IF_0_0):
return IF_0_0
def arrays_basic_06__main__loop_3__loop_4__assign__j__0():
return 1
def arrays_basic_06__main__loop_3__loop_4__condition__IF_0__0(j):
return 0 <= j < 6
def arrays_basic_06__main__loop_3__loop_4__decision__EXIT__0(IF_0_0):
return IF_0_0
def arrays_basic_06__main__loop_3__loop_4__assign_j__1(j):
return j + 1
def arrays_basic_06__main__loop_3__assign_i__1(i):
return i + 1
| 27.213333 | 95 | 0.790789 |
79583fdb9759e3e55a66d2ea6e9a69fe1b0900fd | 1,445 | py | Python | services/sidecar/src/simcore_service_sidecar/celery_log_setup.py | GitHK/osparc-simcore-forked | 5b01a28d1b8028afcf9a735e1d46a73daa13686e | [
"MIT"
] | null | null | null | services/sidecar/src/simcore_service_sidecar/celery_log_setup.py | GitHK/osparc-simcore-forked | 5b01a28d1b8028afcf9a735e1d46a73daa13686e | [
"MIT"
] | 17 | 2020-10-15T16:06:05.000Z | 2022-03-21T18:48:21.000Z | services/sidecar/src/simcore_service_sidecar/celery_log_setup.py | GitHK/osparc-simcore-forked | 5b01a28d1b8028afcf9a735e1d46a73daa13686e | [
"MIT"
] | null | null | null | """ setup logging formatters to fit logspout's multiline pattern "^(ERROR|WARNING|INFO|DEBUG|CRITICAL)[:]"
NOTE: import to connect signals!
SEE https://github.com/ITISFoundation/osparc-ops/blob/master/services/graylog/docker-compose.yml#L113
"""
# NOTES:
# https://docs.celeryproject.org/en/latest/userguide/signals.html#setup-logging
# https://www.distributedpython.com/2018/08/28/celery-logging/
# https://www.distributedpython.com/2018/11/06/celery-task-logger-format/
from celery.app.log import TaskFormatter
from celery.signals import after_setup_logger, after_setup_task_logger
from celery.utils.log import get_task_logger
from servicelib.logging_utils import CustomFormatter, set_logging_handler
@after_setup_logger.connect
def setup_loggers(logger, *_args, **_kwargs):
""" Customizes global loggers """
set_logging_handler(logger)
class TaskColoredFormatter(TaskFormatter, CustomFormatter):
pass
@after_setup_task_logger.connect
def setup_task_logger(logger, *_args, **_kwargs):
""" Customizes task loggers """
set_logging_handler(
logger,
formatter_base=TaskColoredFormatter,
formatting="%(levelname)s: [%(asctime)s/%(processName)s][%(task_name)s(%(task_id)s)] [%(filename)s:%(lineno)d] %(message)s",
)
# TODO: configure via command line or config file. Add in config.yaml
log = get_task_logger(__name__)
log.info("Setting up loggers")
__all__ = ["get_task_logger"]
| 34.404762 | 132 | 0.756401 |
795840a68519fe6d866843feb603c4fdfec9d4f8 | 1,623 | py | Python | genz/funcs.py | larsoner/genz-1 | dc7a73b4597f976c0274d696c2610c79b7a1f7c1 | [
"MIT"
] | 2 | 2020-12-08T05:29:08.000Z | 2021-04-21T21:37:42.000Z | genz/funcs.py | larsoner/genz-1 | dc7a73b4597f976c0274d696c2610c79b7a1f7c1 | [
"MIT"
] | 10 | 2021-02-12T02:56:55.000Z | 2021-09-08T18:59:51.000Z | genz/funcs.py | larsoner/genz-1 | dc7a73b4597f976c0274d696c2610c79b7a1f7c1 | [
"MIT"
] | 2 | 2019-01-10T20:47:40.000Z | 2021-04-21T15:21:49.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path as op
import itertools
import mne
import numpy as np
from genz import defaults
def expand_grid(data_dict):
import pandas as pd
rows = itertools.product(*data_dict.values())
return pd.DataFrame.from_records(rows, columns=data_dict.keys())
def compute_adjacency_matrix(connectivity, threshold_prop=0.2):
"""Threshold and binarize connectivity matrix.
Notes:
https://github.com/mne-tools/mne-python/blob
/9585e93f8c2af699000bb47c2d6da1e9a6d79251/mne/connectivity/utils.py
#L69-L92
"""
if connectivity.ndim != 2 or \
connectivity.shape[0] != connectivity.shape[1]:
raise ValueError('connectivity must be have shape (n_nodes, n_nodes), '
'got %s' % (connectivity.shape,))
n_nodes = len(connectivity)
if np.allclose(connectivity, connectivity.T):
split = 2.
connectivity[np.tril_indices(n_nodes)] = 0
else:
split = 1.
threshold_prop = float(threshold_prop)
if not 0 < threshold_prop <= 1:
raise ValueError('threshold must be 0 <= threshold < 1, got %s'
% (threshold_prop,))
degree = connectivity.ravel() # no need to copy because np.array does
degree[::n_nodes + 1] = 0.
n_keep = int(round((degree.size - len(connectivity)) *
threshold_prop / split))
degree[np.argsort(degree)[:-n_keep]] = 0
degree.shape = connectivity.shape
if split == 2:
degree += degree.T # normally unsafe, but we know where our zeros are
return degree
| 32.46 | 79 | 0.640789 |
79584158cc406819d1db1e3d076dc499d144cf3b | 404 | py | Python | PycharmProjects/pythonexercicios/aula21/ex105.py | zmixtv1/cev-Python | edce04f86d943d9af070bf3c5e89575ff796ec9e | [
"MIT"
] | null | null | null | PycharmProjects/pythonexercicios/aula21/ex105.py | zmixtv1/cev-Python | edce04f86d943d9af070bf3c5e89575ff796ec9e | [
"MIT"
] | null | null | null | PycharmProjects/pythonexercicios/aula21/ex105.py | zmixtv1/cev-Python | edce04f86d943d9af070bf3c5e89575ff796ec9e | [
"MIT"
] | null | null | null | def notas(*n, sit=False):
r= dict()
r["total"] = len(n)
r["maior"] = max(n)
r["menor"] = min(n)
r["média"] = sum(n)/len(n)
if sit:
if r["média"] >= 7:
r["situação"] = "Boa"
elif r["média"] >= 5:
r["situação"] = "Razoável"
else:
r["situação"] = "Ruim"
return r
resp = notas(5.5, 9.5, 10, 6.5, sit=True)
print(resp)
| 21.263158 | 41 | 0.438119 |
7958432834ce4664c1c8bdd059cb2b1632aa2b1b | 817 | py | Python | build.py | team-counterpoint/tcmc-path | 7c8b2e9767a1cee8b519c9acc8c1d001af0daf86 | [
"BSD-2-Clause"
] | null | null | null | build.py | team-counterpoint/tcmc-path | 7c8b2e9767a1cee8b519c9acc8c1d001af0daf86 | [
"BSD-2-Clause"
] | null | null | null | build.py | team-counterpoint/tcmc-path | 7c8b2e9767a1cee8b519c9acc8c1d001af0daf86 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
"""Script to generate Alloy files from the template."""
import argparse
import jinja2
import re
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--name")
parser.add_argument("--fc", action="store_true")
parser.add_argument("--path", action="store_true")
parser.add_argument("--subgraph", action="store_true")
args = parser.parse_args()
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath="./"),
trim_blocks=True,
lstrip_blocks=True,
)
template = env.get_template("template.als")
result = template.render(
name=args.name, fc=args.fc, path=args.path, subgraph=args.subgraph
)
result = re.sub(r"\n\n+", "\n\n", result)
print(result)
if __name__ == "__main__":
main()
| 24.757576 | 74 | 0.654835 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.