blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff5c6567f9fff32ac5c8b6f11625a07f9b3e5b72 | 0c44f2b1edcb50715eda693980a013970d777182 | /coursera_algo_hse/c1/w5/primitive_calculator.py | e457347c76755753409050bc8d5a80198430121d | [] | no_license | re9ulus/moocs_std | c1e67b99da3eb894a9ef6ee4f16ae4e39735b6e5 | 29b14164fec969c3801c72b96357ee109246d642 | refs/heads/master | 2021-06-14T08:36:42.658838 | 2020-12-05T17:08:19 | 2020-12-05T17:08:19 | 68,093,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # Uses python2
import sys
def optimal_sequence(n):
ar = [n+1 for i in range(n+1)]
ar[0], ar[1] = 1, 1
for i in range(1, n+1):
if i * 3 <= n:
ar[i * 3] = min(ar[i * 3], ar[i] + 1)
if i * 2 <= n:
ar[i * 2] = min(ar[i * 2], ar[i] + 1)
if i + 1 <= n:
ar[i + 1] = min(ar[i + 1], ar[i] + 1)
i = n
seq = []
while i >= 1:
seq.append(i)
min_val, next_i = ar[i - 1], i - 1
if i % 3 == 0 and ar[i / 3] < min_val:
min_val, next_i = ar[i / 3], i / 3
if i % 2 == 0 and ar[i / 2] < min_val:
min_val, next_i = ar[i / 2], i / 2
i = next_i
return list(reversed(seq))
input = sys.stdin.read()
n = int(input)
sequence = optimal_sequence(n)
print(len(sequence) - 1)
ans = ' '.join(map(str, sequence))
print ans
| [
"angeldaren@gmail.com"
] | angeldaren@gmail.com |
6e7eacfebaf750108ce6ad355cb15f758af4b9d8 | b61bdc56d6a04657fb65d382352dcd17e0858d94 | /tests/gdata_tests/apps/live_client_test.py | 1419ae697dd3d1a39d575b309093a08a92e8c808 | [
"Apache-2.0"
] | permissive | soravux/gdata | a1d9c3a1cf2a0dbb9c2e55621284acf500f02328 | f5a23bdbf3d7929f88b45a983c264218a8746636 | refs/heads/master | 2021-01-24T06:13:56.567522 | 2014-06-12T10:31:48 | 2014-06-12T10:31:48 | 20,752,151 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,821 | py | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Live client tests for the Provisioning API."""
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'Shraddha Gupta <shraddhag@google.com>'
import random
import unittest
import gdata.apps.client
import gdata.apps.data
import gdata.client
import gdata.data
import gdata.gauth
import gdata.test_config as conf
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
class AppsClientTest(unittest.TestCase):
def setUp(self):
self.client = gdata.apps.client.AppsClient(
domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.apps.client.AppsClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'AppsClientTest',
self.client.auth_service, True)
def tearDown(self):
conf.close_client(self.client)
def testClientConfiguration(self):
self.assertEqual('apps-apis.google.com', self.client.host)
self.assertEqual('2.0', self.client.api_version)
self.assertEqual('apps', self.client.auth_service)
self.assertEqual(
('https://apps-apis.google.com/a/feeds/user/',
'https://apps-apis.google.com/a/feeds/policies/',
'https://apps-apis.google.com/a/feeds/alias/',
'https://apps-apis.google.com/a/feeds/groups/'),
self.client.auth_scopes)
if conf.options.get_value('runlive') == 'true':
self.assertEqual(self.client.domain,
conf.options.get_value('appsdomain'))
else:
self.assertEqual(self.client.domain, 'example.com')
def testMakeUserProvisioningUri(self):
self.assertEqual('/a/feeds/%s/user/2.0' % self.client.domain,
self.client._userURL())
def testMakeNicknameProvisioningUri(self):
self.assertEqual('/a/feeds/%s/nickname/2.0' % self.client.domain,
self.client._nicknameURL())
def testCreateRetrieveUpdateDelete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateUpdateDelete')
rnd_number = random.randrange(0, 100001)
username = 'test_user%s' % (rnd_number)
nickname = 'test_alias%s' % (rnd_number)
new_entry = self.client.CreateUser(
user_name=username, given_name='Elizabeth', family_name='Smith',
password='password', admin='true')
self.assertTrue(isinstance(new_entry,
gdata.apps.data.UserEntry))
self.assertEqual(new_entry.name.given_name, 'Elizabeth')
self.assertEqual(new_entry.name.family_name, 'Smith')
self.assertEqual(new_entry.login.user_name, username)
self.assertEqual(new_entry.login.admin, 'true')
fetched_entry = self.client.RetrieveUser(user_name=username)
self.assertEqual(fetched_entry.name.given_name, 'Elizabeth')
self.assertEqual(fetched_entry.name.family_name, 'Smith')
self.assertEqual(fetched_entry.login.user_name, username)
self.assertEqual(fetched_entry.login.admin, 'true')
new_entry.name.given_name = 'Joe'
new_entry.name.family_name = 'Brown'
updated_entry = self.client.UpdateUser(
user_name=username, user_entry=new_entry)
self.assertTrue(isinstance(updated_entry,
gdata.apps.data.UserEntry))
self.assertEqual(updated_entry.name.given_name, 'Joe')
self.assertEqual(updated_entry.name.family_name, 'Brown')
new_nickname = self.client.CreateNickname(user_name=username,
nickname=nickname)
self.assertTrue(isinstance(new_nickname,
gdata.apps.data.NicknameEntry))
self.assertEqual(new_nickname.login.user_name, username)
self.assertEqual(new_nickname.nickname.name, nickname)
fetched_alias = self.client.RetrieveNickname(nickname)
self.assertEqual(fetched_alias.login.user_name, username)
self.assertEqual(fetched_alias.nickname.name, nickname)
self.client.DeleteNickname(nickname)
self.client.DeleteUser(username)
def suite():
return conf.build_suite([AppsClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| [
"yannickhold@gmail.com"
] | yannickhold@gmail.com |
020300eceb6ba440cbc8b882a06277d6760cc7e6 | 2aa4c7c94866e7a958e4787dd4487aa7c1eb8d61 | /applications/trilinos_application/python_scripts/MonolithicMultiLevelSolver.py | a54bb40bcdf4a3a7939b0cf498168f33ff977ce6 | [
"BSD-3-Clause"
] | permissive | PFEM/Kratos | b48df91e6ef5a00edf125e6f5aa398505c9c2b96 | 796c8572e9fe3875562d77370fc60beeacca0eeb | refs/heads/master | 2021-10-16T04:33:47.591467 | 2019-02-04T14:22:06 | 2019-02-04T14:22:06 | 106,919,267 | 1 | 0 | null | 2017-10-14T10:34:43 | 2017-10-14T10:34:43 | null | UTF-8 | Python | false | false | 1,624 | py | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
from KratosMultiphysics.TrilinosApplication import *
def LinearSolver(tolerance, max_iterations):
# settings for the iterative solver
aztec_parameters = ParameterList()
aztec_parameters.set("AZ_solver", "AZ_gmres")
# aztec_parameters.set("AZ_output","AZ_none");
aztec_parameters.set("AZ_output", 10)
# settings of the ML solver
MLList = ParameterList()
default_settings = EpetraDefaultSetter()
default_settings.SetDefaults(MLList, "NSSA")
MLList.set("ML output", 1)
MLList.set("coarse: max size", 10000)
MLList.set("max levels", 3)
# MLList.set("increasing or decreasing","increasing");
# MLList.set("null space: add default vectors", 1);
MLList.set("aggregation: type", "Uncoupled")
# MLList.set("smoother: sweeps",3);
# MLList.set("smoother: pre or post", "both")
# MLList.set("ML output",0);
MLList.set("coarse: type", "Amesos-Superludist")
# MLList.set("smoother: ifpack type", "ILU");
# MLList.set("smoother: ifpack overlap", 0);
# MLList.SetSublistIntValue("smoother: ifpack list","fact: level-of-fill", 5);
# MLList.set("coarse: sweeps", 3)
# MLList.set("coarse: pre or post", "both")
# MLList.set("print unused",1)
# MLList.setboolvalue("energy minimization: enable",0)
# MLList.set("aggregation: damping factor",0.0)
linear_solver = MultiLevelSolver(aztec_parameters, MLList, tolerance, max_iterations)
return linear_solver
| [
"rrossi@cimne.upc.edu"
] | rrossi@cimne.upc.edu |
513143cc032bfe7bf0b443158e43a0ef5e19b9c4 | 68f757e7be32235c73e316888ee65a41c48ecd4e | /python_book(이것이 코딩테스트다)/13 DFS/6 감시피하기.py | 2ae13a528d691db24f99e98aa727d9f3e6b9279b | [] | no_license | leejongcheal/algorithm_python | b346fcdbe9b1fdee33f689477f983a63cf1557dc | f5d9bc468cab8de07b9853c97c3db983e6965d8f | refs/heads/master | 2022-03-05T20:16:21.437936 | 2022-03-03T01:28:36 | 2022-03-03T01:28:36 | 246,039,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | def check(Map):
global N, T
steps = [(1,0),(-1,0),(0,1),(0,-1)]
for tx, ty in T:
for dx, dy in steps:
x, y = tx + dx, ty + dy
while 0 <= x < N and 0 <= y < N:
if Map[x][y] == "O":
break
if Map[x][y] == "S":
return 0
x, y = x + dx, y + dy
return 1
def dfs(x, y):
global L, N, flag, count
if flag == 1:
return
if count == 3:
if check(L):
flag = 1
return
for i in range(N):
for j in range(N):
if x < i or (j > y and x == i):
if L[i][j] == "X":
L[i][j] = "O"
count += 1
dfs(i, j)
L[i][j] = "X"
count -= 1
return
N = int(input())
L = [list(input().split()) for _ in range(N)]
T = []
flag = 0
count = 0
for i in range(N):
for j in range(N):
if L[i][j] == "T":
T.append((i,j))
dfs(-1, -1)
if flag == 1:
print("YES")
else:
print("NO") | [
"aksndk123@naver.com"
] | aksndk123@naver.com |
4a7d8dd81b736d9479bf721127c403932bdcb7da | e46c1e10b4baa74f47eca06bea626ed0c495b56e | /polls/urls.py | 8a290ec06988b435004629c750acf2760713b1d0 | [] | no_license | ywjung25/mysite | 5acc92896d28344d4303fc2f82389447efc0408f | 930d966e34ccab9db2d6451a582b78a0d84eadaa | refs/heads/main | 2023-01-28T12:33:42.914250 | 2020-11-29T13:08:44 | 2020-11-29T13:08:44 | 316,663,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | from django.urls import path
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('', views.index, name='index'),
path('<int:question_id>/', views.detail, name='detail'),
path('<int:question_id>/results/', views.results, name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
"ywjung25@hotmail.com"
] | ywjung25@hotmail.com |
88f0513eb91e116ab084434dcbbf4f2ed2101ac8 | 7b5722dd76e959e816c732b87eafeb8b19e47947 | /shout_app/shouts/views.py | 0ea4086791dc8a287dcd56f8bc02c90d55cf662a | [] | no_license | shhra/shout-prod | f577abc3571a7406d9d8779c005e990fd5d223b3 | 62dc87b83664f910fd81002e31e037611135a872 | refs/heads/master | 2022-04-07T18:51:53.925700 | 2020-02-08T04:59:16 | 2020-02-08T04:59:16 | 208,790,431 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,468 | py | import scipy.spatial
import numpy as np
from django.shortcuts import redirect
from rest_framework import generics, mixins, status, viewsets
from rest_framework.exceptions import NotFound, NotAcceptable
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.reverse import reverse
from notifications.models import Notification
from notifications.signals import notify
from .models import Shout, Comment, Discussion
from .serializers import ShoutSerializer, CommentSerializer, NotificationSerializer
from shout_app.profile.serializers import ProfileSerializer
from shout_app.core.permissions import IsOwnerOrReadOnly
from datetime import datetime, timedelta
class ShoutViewSet(mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
lookup_field = 'slug'
queryset = Shout.objects.all().order_by('-created_at')
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = ShoutSerializer
def get_queryset(self):
queryset = self.queryset
supported_by = self.request.query_params.get('supported', None)
if supported_by is not None:
queryset = queryset.filter(supported_by__user__username=supported_by)
return queryset
def create(self, request):
serializer_context = {
'shouter': request.user.profile,
'request': request
}
serializer_data = request.data.get('data', {})
serializer = self.serializer_class(
data=serializer_data, context=serializer_context)
# Adding spam filter.
past_ten = datetime.now() - timedelta(minutes=10)
shouts = Shout.objects.all().filter(shouter=request.user.profile,
created_at__gte=past_ten)
if len(shouts) > 4:
raise NotAcceptable('You have been spamming a lot, and are banned for 10 min')
else:
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data , status=status.HTTP_201_CREATED)
def list(self, request):
serializer_context = {'request': request}
page = self.paginate_queryset(self.get_queryset())
serializer = self.serializer_class(page, context=serializer_context, many=True)
return self.get_paginated_response(serializer.data)
def retrieve(self, request, slug):
serializer_context = {'request': request}
try:
serializer_instance = self.queryset.get(slug=slug)
except Shout.DoesNotExist:
raise NotFound("No one has shouted this.")
serializer = self.serializer_class(
serializer_instance,
context=serializer_context)
return Response(serializer.data, status=status.HTTP_200_OK)
def destroy(self, request, slug=None):
serializer_context = {'request': request}
try:
serializer_instance = self.queryset.get(slug=slug)
except Shout.DoesNotExist:
raise NotFound("No one has shouted this.")
if request.user.profile == serializer_instance.shouter:
serializer_instance.delete()
else:
raise NotAcceptable("You are denied, you can't delete it.")
return Response(None, status=status.HTTP_204_NO_CONTENT)
class ShoutSupportAPIView(APIView):
permisson_classes = (IsAuthenticated,)
serializer_class = ShoutSerializer
def delete(self, request, slug=None):
profile = self.request.user.profile
serializer_context = {'request': request}
try:
shout = Shout.objects.get(slug=slug)
except Shout.DoesNotExist:
raise NotFound('No one has shouted this')
recipient = shout.shouter.user
# notif_discussion = Notification.objects.all().filter(
# target_object_id=shout.id,
# description="discussion"
# )
# notif_discussion.delete()
notif = recipient.notifications.get(
actor_object_id=profile.user.id,
target_object_id=shout.id,
description="support")
profile.not_support(shout)
notif.delete()
serializer = self.serializer_class(shout, context=serializer_context)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, slug=None):
profile = self.request.user.profile
serializer_context = {'request': request}
try:
shout = Shout.objects.get(slug=slug)
except Shout.DoesNotExist:
raise NotFound('No one has shouted this')
shouter = shout.shouter.user
profile.support(shout)
try:
notif = shouter.notifications.get(
actor_object_id=self.request.user.id,
target_object_id=shout.id,
description="support")
if notif:
pass
except:
notify.send(profile.user,
recipient=shouter,
target=shout,
description="support",
verb=(f"{profile.user} has supported the shout about {shout.title}."))
# recipients = [recipient.user for recipient in shout.supported_by.all()]
# try:
# notif_d = Notification.objects.all().fitler(
# target_object_id=shout.id,
# description="discussion")
# if notif_d:
# pass
# except:
# notify.send(profile.user,
# recipient=recipients,
# target=shout,
# description="discussion",
# verb=(f"Discussion is unlocked for the shout about {shout.title}."))
serializer = self.serializer_class(shout, context=serializer_context)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class CommentListCreateAPIView(generics.ListCreateAPIView):
lookup_field = 'commented_on__slug'
lookup_url_kwarg = 'slug'
permission_classes = (IsOwnerOrReadOnly,)
queryset = Comment.objects.select_related(
'commented_on', 'commented_on__shouter', 'commented_on__shouter__user',
'commented_by', 'commented_by__user'
)
serializer_class = CommentSerializer
def filter_queryset(self, queryset):
filters = {self.lookup_field: self.kwargs[self.lookup_url_kwarg]}
return queryset.filter(**filters)
def create(self, request, slug=None):
data = request.data.get('data', {})
context = {'commented_by': request.user.profile}
try:
context['commented_on'] = shout = Shout.objects.get(slug=slug)
except Shout.DoesNotExist:
raise NotFound("No one shouted this")
if request.user.profile.has_supported(shout):
serializer = self.serializer_class(data=data, context=context)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
raise NotAcceptable("You don't have the permission to comment here. \
Please support to comment.")
class CommentDestroyAPIView(generics.DestroyAPIView):
lookup_url_kwarg = 'comment_slug'
permission_classes = (IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly,)
queryset = Comment.objects.all()
def destroy(self, request, shout_slug=None, comment_slug=None):
try:
comment = Comment.objects.get(slug=comment_slug)
except Comment.DoesNotExist:
raise NotFound('This comment doesn\'t exists!')
try:
shout = Shout.objects.get(slug=shout_slug)
except Shout.DoesNotExist:
raise NotFound("No one shouted this")
if (request.user.profile is comment.commented_by) or (shout.shouter is request.user.profile):
comment.delete()
else:
raise NotAcceptable("You are denied, you can't delete it.")
return Response(None, status=status.HTTP_204_NO_CONTENT)
# View for Echo
class EchoView(generics.GenericAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
serializer_class = ShoutSerializer
def get_object(self, slug):
try:
return Shout.objects.get(slug=slug)
except Shout.DoesNotExist:
raise NotFound("This shout doesn't exist")
def get(self, request, *args, **kwargs):
serializer_context = {'request': request}
shout = self.get_object(slug=kwargs['slug'])
past_one = datetime.now() - timedelta(minutes=1440*7)
query_embedding = np.zeros((420, 768))
query = np.array(shout.value)
query_embedding[:query.shape[0], :query.shape[1]] = query
query_embedding = query_embedding.reshape(1, -1)
corpus = Shout.objects.all().filter(created_at__gte=past_one).exclude(slug=shout.slug)
if len(corpus) == 0:
raise NotFound("No similar shouts in last week")
corpus_lists = list()
for each in corpus:
temp_array = np.array(each.value)
temp = np.zeros((420, 768))
temp[:temp_array.shape[0], :temp_array.shape[1]] = temp_array
corpus_lists.append(temp.reshape(-1))
corpus_embedding = np.array(corpus_lists)
distance = scipy.spatial.distance.cdist(
query_embedding,
corpus_embedding,
'cosine')[0]
context = {}
context['data'] = list()
results = zip(range(len(distance)), distance)
results = sorted(results, key=lambda x: x[1])
for i, _ in results[:5]:
context['data'].append(self.serializer_class(corpus[i]).data)
return Response(context, status=status.HTTP_200_OK)
# # Views related to self
class MeView(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
serializer_class = ProfileSerializer
def get(self, request, format=None):
user = self.request.user.profile
context = {}
context['data'] = self.serializer_class(user).data
return Response(context, status=status.HTTP_200_OK)
class NotificationViewSet(viewsets.ViewSet):
serializer_class = NotificationSerializer
permission_classes = (IsOwnerOrReadOnly,)
def list(self, request):
user = self.request.user
queryset = user.notifications.unread()
return Response(NotificationSerializer(queryset, many=True).data)
class NotificationReadView(generics.GenericAPIView):
permission_classes = (IsOwnerOrReadOnly,)
def get(self, request, *args, **kwargs):
notif = Notification.objects.get(id=kwargs['id'])
notif.mark_as_read()
return Response("ok", status=status.HTTP_200_OK)
class NotificationAllReadView(generics.GenericAPIView):
permission_classes = (IsOwnerOrReadOnly,)
def get(self, request):
user = self.request.user
notifs = user.notifications.all()
notifs.mark_all_as_read()
data = {"read"}
return Response("read", status=status.HTTP_200_OK)
| [
"071bct538@pcampus.edu.np"
] | 071bct538@pcampus.edu.np |
87ebd31e1bebd80945306794db5c8a33e2a23f9e | 80ac27ffbecd581c5169f0f89794a514097b07e0 | /Python_Scripts/Mount_Unmount_USB.py | cb5be52bf744cbb35bb91fdad41cc0c6c9d5a391 | [] | no_license | sujaynsa/Project-LifeBox | 66c37bc240d44b681071bb04f89fb302c438b774 | bb11d33495a140895a10055b8d955311a00dbc4c | refs/heads/master | 2020-07-02T10:43:49.502182 | 2019-12-02T22:58:09 | 2019-12-02T22:58:09 | 201,502,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #Mount and Unmount USB devices in Raspberry Pi
import os
import shutil
"""
x = os.system("sudo mount /dev/sda1 /media/usb")
if x == 0 :
print("Device has been mounted")
else:
print("No Device found")
"""
#os.system("sudo umount /media/usb")
#print("Device unmounted")
| [
"noreply@github.com"
] | sujaynsa.noreply@github.com |
230a4c570aaab54e6c491ad0822a5847e7ca719a | a2fc18c48713326a692d7249e432827802c6268f | /tests/test_basic.py | 6ceb1d9fad950c7e65c6ad24781605200af2f53a | [] | no_license | dcHHH/byterun | 5ab809d11bd241d72c1c045bcfffaac05ac3dbc0 | cd6ed119e9759a6ada1b3d59568998c3c744a63f | refs/heads/master | 2020-04-26T23:25:15.618129 | 2019-03-26T10:34:56 | 2019-03-26T10:34:56 | 173,901,850 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,270 | py | """Basic tests for Byterun."""
from . import vmtest
class TestIt(vmtest.VmTestCase):
def test_constant(self):
self.assert_ok("17")
def test_dict(self):
self.assert_ok("""\
a = {'x': 1}
print(a['x'] == 1)""")
def test_f_string(self):
self.assert_ok("""\
foo = 'foo'
print(f'foo is {foo}')""")
def test_globals(self):
self.assert_ok("""\
global xyz
xyz=2106
def abc():
global xyz
xyz+=1
print("Midst:",xyz)
print("Pre:",xyz)
abc()
print("Post:",xyz)
""")
def test_for_loop(self):
self.assert_ok("""\
out = ""
for i in range(5):
out = out + str(i)
print(out)
""")
def test_inplace_operators(self):
self.assert_ok("""\
x, y = 2, 3
x **= y
assert x == 8 and y == 3
x *= y
assert x == 24 and y == 3
x //= y
assert x == 8 and y == 3
x %= y
assert x == 2 and y == 3
x += y
assert x == 5 and y == 3
x -= y
assert x == 2 and y == 3
x <<= y
assert x == 16 and y == 3
x >>= y
assert x == 2 and y == 3
x = 0x8F
x &= 0xA5
assert x == 0x85
x |= 0x10
assert x == 0x95
x ^= 0x33
assert x == 0xA6
""")
def test_inplace_division(self):
self.assert_ok("""\
x, y = 24, 3
x /= y
assert x == 8.0 and y == 3
assert isinstance(x, float)
x /= y
assert x == (8.0/3.0) and y == 3
assert isinstance(x, float)
""")
def test_slice(self):
self.assert_ok("""\
print("hello, world"[3:8])
""")
self.assert_ok("""\
print("hello, world"[:8])
""")
self.assert_ok("""\
print("hello, world"[3:])
""")
self.assert_ok("""\
print("hello, world"[:])
""")
self.assert_ok("""\
print("hello, world"[::-1])
""")
self.assert_ok("""\
print("hello, world"[3:8:2])
""")
def test_slice_assignment(self):
self.assert_ok("""\
l = list(range(10))
l[3:8] = ["x"]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
l[:8] = ["x"]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
l[3:] = ["x"]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
l[:] = ["x"]
print(l)
""")
def test_slice_deletion(self):
self.assert_ok("""\
l = list(range(10))
del l[3:8]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
del l[:8]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
del l[3:]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
del l[:]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
del l[::2]
print(l)
""")
def test_building_stuff(self):
self.assert_ok("""\
print((1+1, 2+2, 3+3))
""")
self.assert_ok("""\
print([1+1, 2+2, 3+3])
""")
self.assert_ok("""\
print({1:1+1, 2:2+2, 3:3+3})
""")
def test_subscripting(self):
self.assert_ok("""\
l = list(range(10))
print("%s %s %s" % (l[0], l[3], l[9]))
""")
self.assert_ok("""\
l = list(range(10))
l[5] = 17
print(l)
""")
self.assert_ok("""\
l = list(range(10))
del l[5]
print(l)
""")
def test_generator_expression(self):
self.assert_ok("""\
x = "-".join(str(z) for z in range(5))
assert x == "0-1-2-3-4"
""")
# From test_regr.py
# This failed a different way than the previous join when genexps were
# broken:
self.assert_ok("""\
from textwrap import fill
x = set(['test_str'])
width = 70
indent = 4
blanks = ' ' * indent
res = fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks)
print(res)
""")
def test_list_comprehension(self):
self.assert_ok("""\
x = [z*z for z in range(5)]
assert x == [0, 1, 4, 9, 16]
""")
def test_dict_comprehension(self):
self.assert_ok("""\
x = {z:z*z for z in range(5)}
assert x == {0:0, 1:1, 2:4, 3:9, 4:16}
""")
def test_set_comprehension(self):
self.assert_ok("""\
x = {z*z for z in range(5)}
assert x == {0, 1, 4, 9, 16}
""")
def test_strange_sequence_ops(self):
# from stdlib: test/test_augassign.py
self.assert_ok("""\
x = [1,2]
x += [3,4]
x *= 2
assert x == [1, 2, 3, 4, 1, 2, 3, 4]
x = [1, 2, 3]
y = x
x[1:2] *= 2
y[1:2] += [1]
assert x == [1, 2, 1, 2, 3]
assert x is y
""")
def test_unary_operators(self):
self.assert_ok("""\
x = 8
print(-x, ~x, not x)
""")
def test_attributes(self):
self.assert_ok("""\
l = lambda: 1 # Just to have an object...
l.foo = 17
print(hasattr(l, "foo"), l.foo)
del l.foo
print(hasattr(l, "foo"))
""")
def test_attribute_inplace_ops(self):
self.assert_ok("""\
l = lambda: 1 # Just to have an object...
l.foo = 17
l.foo -= 3
print(l.foo)
""")
def test_deleting_names(self):
self.assert_ok("""\
g = 17
assert g == 17
del g
g
""", raises=NameError)
def test_deleting_local_names(self):
self.assert_ok("""\
def f():
l = 23
assert l == 23
del l
l
f()
""", raises=NameError)
def test_import(self):
self.assert_ok("""\
import math
print(math.pi, math.e)
from math import sqrt
print(sqrt(2))
from math import *
print(sin(2))
""")
def test_callback(self):
self.assert_ok("""\
def lcase(s):
return s.lower()
l = ["xyz", "ABC"]
l.sort(key=lcase)
print(l)
assert l == ["ABC", "xyz"]
""")
def test_unpacking(self):
self.assert_ok("""\
a, b, c = (1, 2, 3)
assert a == 1
assert b == 2
assert c == 3
""")
def test_build_unpacking(self):
self.assert_ok("""\
a = (*[1, 2], *[3, 4])
print(a)
""")
self.assert_ok("""\
[*[1, 2], *[3, 4]]
""")
self.assert_ok("""\
{*[1, 2], *[3, 4]}
""")
self.assert_ok("""\
{**{1: 2}, **{3: 4}}
""")
def test_exec_statement(self):
self.assert_ok("""\
g = {}
exec("a = 11", g, g)
assert g['a'] == 11
""")
def test_jump_if_true_or_pop(self):
self.assert_ok("""\
def f(a, b):
return a or b
assert f(17, 0) == 17
assert f(0, 23) == 23
assert f(0, "") == ""
""")
def test_jump_if_false_or_pop(self):
self.assert_ok("""\
def f(a, b):
return not(a and b)
assert f(17, 0) is True
assert f(0, 23) is True
assert f(0, "") is True
assert f(17, 23) is False
""")
def test_pop_jump_if_true(self):
self.assert_ok("""\
def f(a):
if not a:
return 'foo'
else:
return 'bar'
assert f(0) == 'foo'
assert f(1) == 'bar'
""")
def test_decorator(self):
self.assert_ok("""\
def verbose(func):
def _wrapper(*args, **kwargs):
return func(*args, **kwargs)
return _wrapper
@verbose
def add(x, y):
return x+y
add(7, 3)
""")
class TestLoops(vmtest.VmTestCase):
def test_for(self):
self.assert_ok("""\
for i in range(10):
print(i)
print("done")
""")
def test_break(self):
self.assert_ok("""\
for i in range(10):
print(i)
if i == 7:
break
print("done")
""")
def test_continue(self):
# fun fact: this doesn't use CONTINUE_LOOP
self.assert_ok("""\
for i in range(10):
if i % 3 == 0:
continue
print(i)
print("done")
""")
def test_continue_in_try_except(self):
self.assert_ok("""\
for i in range(10):
try:
if i % 3 == 0:
continue
print(i)
except ValueError:
pass
print("done")
""")
def test_continue_in_try_finally(self):
self.assert_ok("""\
for i in range(10):
try:
if i % 3 == 0:
continue
print(i)
finally:
print(".")
print("done")
""")
class TestComparisons(vmtest.VmTestCase):
def test_in(self):
self.assert_ok("""\
assert "x" in "xyz"
assert "x" not in "abc"
assert "x" in ("x", "y", "z")
assert "x" not in ("a", "b", "c")
""")
def test_less(self):
self.assert_ok("""\
assert 1 < 3
assert 1 <= 2 and 1 <= 1
assert "a" < "b"
assert "a" <= "b" and "a" <= "a"
""")
def test_greater(self):
self.assert_ok("""\
assert 3 > 1
assert 3 >= 1 and 3 >= 3
assert "z" > "a"
assert "z" >= "a" and "z" >= "z"
""")
| [
"744947370@QQ.COM"
] | 744947370@QQ.COM |
24e84c42808d312b25f3bd226fb296a11b79595f | 77c518b87e67e9926d130f856a7edb12302596eb | /IO/Parallel/Testing/Python/TestPDataSetReaderWriterWithFieldData.py | 76a8793d32ef60bca8e46e719d3fcad2ef9b589b | [
"BSD-3-Clause"
] | permissive | t3dbrida/VTK | 73e308baa1e779f208421a728a4a15fec5c4f591 | e944bac3ba12295278dcbfa5d1cd7e71d6457bef | refs/heads/master | 2023-08-31T21:01:58.375533 | 2019-09-23T06:43:00 | 2019-09-23T06:43:00 | 139,547,456 | 2 | 0 | NOASSERTION | 2019-11-22T14:46:48 | 2018-07-03T07:49:14 | C++ | UTF-8 | Python | false | false | 1,173 | py | # Tests paraview/paraview#18391
from vtkmodules.util.misc import vtkGetDataRoot, vtkGetTempDir
from vtk import vtkXMLGenericDataObjectReader, vtkDoubleArray, vtkDataSetWriter, vtkPDataSetReader
from os.path import join
reader = vtkXMLGenericDataObjectReader()
reader.SetFileName(join(vtkGetDataRoot(), "Data/multicomb_0.vts"))
reader.Update()
a1 = vtkDoubleArray()
a1.SetName("field-1")
a1.SetNumberOfTuples(1)
a1.SetValue(0, 100.0)
a1.GetRange()
a2 = vtkDoubleArray()
a2.SetName("field-2")
a2.SetNumberOfTuples(2)
a2.SetValue(0, 1.0)
a2.SetValue(1, 2.0)
a2.GetRange()
dobj = reader.GetOutputDataObject(0)
dobj.GetFieldData().AddArray(a1)
dobj.GetFieldData().AddArray(a2)
writer = vtkDataSetWriter()
writer.SetFileName(join(vtkGetTempDir(), "TestPDataSetReaderWriterWithFieldData.vtk"))
writer.SetInputDataObject(dobj)
writer.Write()
reader = vtkPDataSetReader()
reader.SetFileName(join(vtkGetTempDir(), "TestPDataSetReaderWriterWithFieldData.vtk"))
reader.Update()
dobj2 = reader.GetOutputDataObject(0)
assert (dobj.GetNumberOfPoints() == dobj2.GetNumberOfPoints() and
dobj.GetFieldData().GetNumberOfArrays() == dobj2.GetFieldData().GetNumberOfArrays())
| [
"utkarsh.ayachit@kitware.com"
] | utkarsh.ayachit@kitware.com |
4009361d3230b25bd783c8a62243914aa44d83e8 | dad6ba45f05d267f6c44bd27949868dc474476e6 | /CQT/Archive/withoutHead.py | b880268f61562e2aaff0d40889c0d47085412c3c | [] | no_license | morindaz/CQT_All | 1f36c5ef22348e2293d9f4f63e58009f0dd274b7 | 8ab6f82ad7b1cf3b4555fe785566013f5cb57a4f | refs/heads/master | 2021-04-27T00:11:17.744327 | 2018-03-04T07:19:06 | 2018-03-04T07:19:06 | 123,765,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # coding=utf-8
import time
import glob
import pandas as pd
#这里的C数据和R相反
answerR = "E:\\pingan\\dataset\\newFeature\\answer_C" #492个数据,withouthead
answerC = "E:\\pingan\\dataset\\newFeature\\answer_R" #244个数据
answerI = "E:\\pingan\\dataset\\newFeature\\answer_I" #478个数据
base = answerI
csvx_list = glob.glob(base+"\\"+'*.csv')
print('总共发现%s个CSV文件'% len(csvx_list))
time.sleep(2)
print('正在处理............')
df = pd.DataFrame()
for i in csvx_list:
df_c = pd.read_csv(i, sep=',', header=0)
# print(df_c['video_name'].tolist())
# fr = i.values
# print df_c
df = df.append(df_c)
#print df
print('写入成功!')
output_Archive = pd.DataFrame(df)
output_Archive.to_csv("base"+'.csv')
print('写入完毕!')
print('3秒钟自动关闭程序!')
time.sleep(3) | [
"morindaz.mao@ucloud.cn"
] | morindaz.mao@ucloud.cn |
8845d190ae1c3cf32b9e4665e81304da16378cc6 | 56b7e5ed6941fc4b83148e00bd51421dc3ac993a | /hackerrank/Delete Nodes Greater Than X/Delete Nodes Greater Than X.py | 6c8ef8211533ee3666850ccdc29c91e09917498c | [] | no_license | samir-0711/Leetcode-Python | f960e15015a3f2fd88f723d7f9237945a7133553 | d75876ae96bcd85c67bbfbf91bbc0f0bc773e97c | refs/heads/master | 2022-12-18T05:27:48.224001 | 2020-09-30T21:03:42 | 2020-09-30T21:03:42 | 300,061,318 | 0 | 0 | null | 2020-09-30T20:59:42 | 2020-09-30T20:59:42 | null | UTF-8 | Python | false | false | 1,590 | py | '''
Complete the removeNodes function provided in your editor. It has 2 parameters:
1. list: A reference to a Linked List Node that is the head of a linked list.
2. x: An integer value.
Your funciton should remove all nodes from the list having data values greater than x, and then return the head of the modified linked list.
Input Format
The locked stub code in your editer processes the following inputs and pased the necessary arguments to the removeNodes function:
The first line contains N, the number of nodes in the linked list.
Each line i (where 0<= i <) of the N subsequent lines contains an integer representing the value of a node in the linked list. The last line contains an integer, x.
Output Format
Return the linked list after removing the nodes containing values > x.
Sample Input 1:
5
1
2
3
4
5
3
Sample Output 1:
1
2
3
Sample Input 2:
5
5
2
1
6
7
5
Sample Output2:
5
2
1
'''
class LinkedListNode:
def __init__(self, node_Value):
self.val = node_Value
self.next = None
def _insert_node_into_singlylinkedlist(head, tail, val):
if head == None:
head = LinkedListNode(val)
tail = head
else:
node = LinkedListNode(val)
tail.next = node
tail = tail.next
def removeNodes(list, x):
if list == None or x == None:
return None
temp = list
while temp.val > x:
temp = temp.next
curr = temp
prev = None
while curr != None:
if curr.val > x:
prev.next = curr.next
else:
prev = curr
curr = curr.next
return temp
| [
"weng8916@gmail.com"
] | weng8916@gmail.com |
e3ed3147192d7383a11bfd2cd2f5568e9a527d53 | f6c144bc2bb8db67eba17f22bbbfe456446e9d80 | /Tensorflow1.x/RNN/LSTM_sine.py | 1e59bc25abfb5b506f8e29d56d307370787e659b | [] | no_license | zwzwtao/Deep-Learning | 6e6d2297ef3559f3bed251237247298a54cd363f | 8c4c1973dd98edf887fbef22162d90ad2b95a0ae | refs/heads/master | 2020-07-01T11:49:54.514930 | 2019-11-18T13:41:38 | 2019-11-18T13:41:38 | 201,166,604 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
HIDDEN_SIZE = 30
NUM_LAYERS = 2
TIMESTEPS = 10
TRAINING_STEPS = 10000
BATCH_SIZE = 32
TRAINING_EXAMPLES = 10000
TESTING_EXAMPLES = 1000
SAMPLE_GAP = 0.01
def generate_data(seq):
X = []
y = []
for i in range(len(seq) - TIMESTEPS):
X.append([seq[i: i + TIMESTEPS]])
y.append([seq[i + TIMESTEPS]])
return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)
# generate training data and test data
test_start = (TRAINING_EXAMPLES + TIMESTEPS) * SAMPLE_GAP
test_end = test_start + (TESTING_EXAMPLES + TIMESTEPS) * SAMPLE_GAP
train_X, train_y = generate_data(np.sin(np.linspace(
0, test_start, TRAINING_EXAMPLES + TIMESTEPS, dtype=np.float32)))
test_X, test_y = generate_data(np.sin(np.linspace(
test_start, test_end, TESTING_EXAMPLES + TIMESTEPS, dtype=np.float32)))
def lstm_model(X, y, is_training):
cell = tf.nn.rnn_cell.MultiRNNCell([
tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
for _ in range(NUM_LAYERS)])
outputs, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
# this is false: (unsolved error): outputs, _ = keras.layers.RNN(cell, X, dtype=tf.float32)
output = outputs[:, -1, :]
predictions = tf.contrib.layers.fully_connected(
output, 1, activation_fn=None)
if not is_training:
return predictions, None, None, output
loss = tf.losses.mean_squared_error(labels=y, predictions=predictions)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.train.get_global_step(),
optimizer="Adagrad", learning_rate=0.1)
return predictions, loss, train_op, output
def run_eval(sess, test_X, test_y):
ds = tf.data.Dataset.from_tensor_slices((test_X, test_y))
ds = ds.batch(1)
X, y = ds.make_one_shot_iterator().get_next()
with tf.variable_scope("model", reuse=True):
prediction, _, _, output = lstm_model(X, [0.0], False)
print("output", output)
predictions = []
labels = []
for i in range(TESTING_EXAMPLES):
p, l = sess.run([prediction, y])
predictions.append(p)
labels.append(l)
print(predictions)
predictions = np.array(predictions).squeeze()
labels = np.array(labels).squeeze()
rmse = np.sqrt(((predictions - labels) ** 2).mean(axis=0))
print("Root Mean Square Error is: %f" % rmse)
plt.figure()
plt.plot(predictions, label='predictions')
plt.plot(labels, label='real_sin')
plt.legend()
plt.show()
ds = tf.data.Dataset.from_tensor_slices((train_X, train_y))
ds = ds.repeat().shuffle(1000).batch(BATCH_SIZE)
X, y = ds.make_one_shot_iterator().get_next()
with tf.variable_scope("model"):
_, loss, train_op, output = lstm_model(X, y, True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("Evaluate model before training.")
run_eval(sess, test_X, test_y)
for i in range(TRAINING_STEPS):
_, l = sess.run([train_op, loss])
if i % 1000 == 0:
print("train step: " + str(i) + ", loss: " + str(l))
print("Evaluate model after training.")
run_eval(sess, test_X, test_y)
| [
"zwtao99@outlook.com"
] | zwtao99@outlook.com |
c0aae764cd16c28afd1321ff20eb42f51d14ae5e | c8370b9a146318665cd7a579432e0bb70e816edb | /src/sets.py | a93e94b21d47d432efdfcab58962727eb756e6ee | [] | no_license | Leporoni/PythonBasic | 18fb007b266ce7048b7587e6e010186d26333a54 | d1b7cbb7f0836dc9d96bf6afa65827ed1cf08159 | refs/heads/master | 2020-03-28T09:42:38.191350 | 2018-09-09T18:26:39 | 2018-09-09T18:26:39 | 148,053,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | set_1 = set("Alessando de Andrade Rocha")
print(set_1)
print(len(set_1))
set_1.add("A")
print(set_1)
set_wepons = set(["revolver", "pistol", "rifle"])
print(set_wepons)
set_wepons.add("Bazooka")
print(set_wepons)
print("===================================")
s_1 = set([1, 2, 3])
s_1 = {1, 2, 3}
print(type(s_1))
print(s_1)
| [
"leporoni@hotmail.com"
] | leporoni@hotmail.com |
0e1e5964dffa6e5368ee64dad1fabc897c970716 | a4016e3a099af344cbb07eff1a9268b621305a10 | /hackerrank/power-calculation.py | 426830510e67af731899dffd21ada79f3355ff4e | [
"MIT"
] | permissive | btjanaka/algorithm-problems | 4d92ae091676a90f5a8cb5a0b6b8c65e34946aee | e3df47c18451802b8521ebe61ca71ee348e5ced7 | refs/heads/master | 2021-06-17T22:10:01.324863 | 2021-02-09T11:29:35 | 2021-02-09T11:29:35 | 169,714,578 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | # Author: btjanaka (Bryon Tjanaka)
# Problem: (HackerRank) power-calculation
# Title: Power Calculation
# Link: https://www.hackerrank.com/challenges/power-calculation/problem
# Idea: Since the question asks for the last two digits, we need to mod the
# final answer by 100. We can distribute this mod 100 over the entire summation
# using various mod rules. Furthermore, we realize that
# k^n mod 100 = (k mod 100)^n mod 100, which means that we only have 100 unique
# bases (0-99) that we need to raise to the power of n. We can multiply the sum
# of the powers of those unique bases as appropriate.
# Difficulty: medium
# Tags: math
for _ in range(int(input())):
k, n = map(int, input().split())
cumsum = [0 for _ in range(100)] # Cumulative sum from 0^n to 99^n
for i in range(1, 100):
cumsum[i] = (cumsum[i - 1] + pow(i, n, 100)) % 100
# Count how many times we repeat the sum from 0 to 99, and count the remainder
whole, rem = k // 100, k % 100
tot = (((whole % 100) * cumsum[99]) % 100 + cumsum[rem]) % 100
print(f"{tot:02d}")
| [
"bryon@btjanaka.net"
] | bryon@btjanaka.net |
8d2c00758c2d8546a5fcb8b20230b14ba1ecb838 | 0ca34c2fcbb8ad65ea44ffd1f3c81793f61a76aa | /serial_checkYellow.py | 9b87b488fb4ecc536bd572a28abd74d793ac3e0d | [] | no_license | mikebuilder/rbe3001rbp | 63ab5f51931c70bb3c5d6f162850db9310cede8b | 230cbe9ca7cf8193dff141162d92223387426897 | refs/heads/master | 2020-04-01T14:47:25.494044 | 2018-10-12T22:09:16 | 2018-10-12T22:09:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py |
#!/usr/bin/env python
import time
import serial
ser = serial.Serial(
port='/dev/serial0',
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
time.sleep(0.3)
ser.write('16e')
time.sleep(0.1)
returnData = ser.read()
print(returnData)
ser.close()
| [
"nbjohnson10@gmail.com"
] | nbjohnson10@gmail.com |
59812772783380dd6340412af14255fc7cbb7fdc | 9745f847ff7606d423918fdf4c7135d930a48181 | /peering/migrations/0001_v1.0.0.py | 9217c861c8e8d871d51b04d9dca8ee224fa82471 | [
"Apache-2.0"
] | permissive | mxhob1/peering-manager | 097167707e499307632ffeaaba72b381a4290347 | 6c15aacdef5ed267d2602fb313eee8ee8a11149a | refs/heads/master | 2021-05-18T23:26:44.553331 | 2020-07-20T06:35:22 | 2020-07-20T06:35:22 | 252,051,461 | 1 | 0 | Apache-2.0 | 2020-04-02T06:18:48 | 2020-04-01T02:30:46 | Python | UTF-8 | Python | false | false | 48,334 | py | # Generated by Django 2.2.7 on 2019-11-13 20:51
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import netfields.fields
import peering.fields
import taggit.managers
import utils.validators
class Migration(migrations.Migration):
def forward_transition_from_none_to_zero(apps, schema_editor):
models = {
"AutonomousSystem": {
"filters": {"ipv4_max_prefixes": None, "ipv6_max_prefixes": None},
"updates": {"ipv4_max_prefixes": 0, "ipv6_max_prefixes": 0},
},
"DirectPeeringSession": {
"filters": {
"advertised_prefix_count": None,
"received_prefix_count": None,
},
"updates": {"advertised_prefix_count": 0, "received_prefix_count": 0},
},
"InternetExchange": {
"filters": {"peeringdb_id": None},
"updates": {"peeringdb_id": 0},
},
"InternetExchangePeeringSession": {
"filters": {
"advertised_prefix_count": None,
"received_prefix_count": None,
},
"updates": {"advertised_prefix_count": 0, "received_prefix_count": 0},
},
}
db_alias = schema_editor.connection.alias
for key, value in models.items():
model = apps.get_model("peering", key)
model.objects.using(db_alias).filter(**value["filters"]).update(
**value["updates"]
)
def reverse_transition_from_none_to_zero(apps, schema_editor):
models = {
"AutonomousSystem": {
"filters": {"ipv4_max_prefixes": 0, "ipv6_max_prefixes": 0},
"updates": {"ipv4_max_prefixes": None, "ipv6_max_prefixes": None},
},
"DirectPeeringSession": {
"filters": {"advertised_prefix_count": 0, "received_prefix_count": 0},
"updates": {
"advertised_prefix_count": None,
"received_prefix_count": None,
},
},
"InternetExchange": {
"filters": {"peeringdb_id": 0},
"updates": {"peeringdb_id": None},
},
"InternetExchangePeeringSession": {
"filters": {"advertised_prefix_count": 0, "received_prefix_count": 0},
"updates": {
"advertised_prefix_count": None,
"received_prefix_count": None,
},
},
}
db_alias = schema_editor.connection.alias
for key, value in models:
model = apps.get_model("peering", key)
for field in value:
model.objects.using(db_alias).filter(**value["filters"]).update(
**value["updates"]
)
def forward_transition_from_minus_one_to_zero(apps, schema_editor):
models = {
"AutonomousSystem": {
"filters": {"ipv4_max_prefixes": -1, "ipv6_max_prefixes": -1},
"updates": {"ipv4_max_prefixes": 0, "ipv6_max_prefixes": 0},
},
"DirectPeeringSession": {
"filters": {"advertised_prefix_count": -1, "received_prefix_count": -1},
"updates": {"advertised_prefix_count": 0, "received_prefix_count": 0},
},
"InternetExchange": {
"filters": {"peeringdb_id": -1},
"updates": {"peeringdb_id": 0},
},
"InternetExchangePeeringSession": {
"filters": {"advertised_prefix_count": -1, "received_prefix_count": -1},
"updates": {"advertised_prefix_count": 0, "received_prefix_count": 0},
},
}
db_alias = schema_editor.connection.alias
for key, value in models.items():
model = apps.get_model("peering", key)
model.objects.using(db_alias).filter(**value["filters"]).update(
**value["updates"]
)
def reverse_transition_from_minus_one_to_zero(apps, schema_editor):
models = {
"AutonomousSystem": {
"filters": {"ipv4_max_prefixes": 0, "ipv6_max_prefixes": 0},
"updates": {"ipv4_max_prefixes": -1, "ipv6_max_prefixes": -1},
},
"DirectPeeringSession": {
"filters": {"advertised_prefix_count": 0, "received_prefix_count": 0},
"updates": {"advertised_prefix_count": -1, "received_prefix_count": -1},
},
"InternetExchange": {
"filters": {"peeringdb_id": 0},
"updates": {"peeringdb_id": -1},
},
"InternetExchangePeeringSession": {
"filters": {"advertised_prefix_count": 0, "received_prefix_count": 0},
"updates": {"advertised_prefix_count": -1, "received_prefix_count": -1},
},
}
db_alias = schema_editor.connection.alias
for key, value in models:
model = apps.get_model("peering", key)
for field in value:
model.objects.using(db_alias).filter(**value["filters"]).update(
**value["updates"]
)
def forward_transition_from_none_to_empty_list(apps, schema_editor):
AutonomousSystem = apps.get_model("peering", "AutonomousSystem")
db_alias = schema_editor.connection.alias
AutonomousSystem.objects.using(db_alias).filter(
potential_internet_exchange_peering_sessions=None
).update(potential_internet_exchange_peering_sessions=[])
def reverse_transition_from_none_to_empty_list(apps, schema_editor):
AutonomousSystem = apps.get_model("peering", "AutonomousSystem")
db_alias = schema_editor.connection.alias
AutonomousSystem.objects.using(db_alias).filter(
potential_internet_exchange_peering_sessions=[]
).update(potential_internet_exchange_peering_sessions=None)
def add_permissions(apps, schema_editor):
pass
def remove_permissions(apps, schema_editor):
"""Reverse the above additions of permissions."""
ContentType = apps.get_model("contenttypes.ContentType")
Permission = apps.get_model("auth.Permission")
try:
content_type = ContentType.objects.get(
model="internetexchange", app_label="peering"
)
Permission.objects.filter(
content_type=content_type,
codename__in=("view_configuration", "deploy_configuration"),
).delete()
except ContentType.DoesNotExist:
pass
replaces = [
("peering", "0001_initial"),
("peering", "0002_auto_20170820_1809"),
("peering", "0003_auto_20170903_1235"),
("peering", "0004_auto_20171004_2323"),
("peering", "0005_auto_20171014_1427"),
("peering", "0006_auto_20171017_1917"),
("peering", "0007_auto_20171202_1900"),
("peering", "0008_auto_20171212_2251"),
("peering", "0009_auto_20171226_1550"),
("peering", "0010_auto_20171228_0158"),
("peering", "0011_auto_20180329_2146"),
("peering", "0012_auto_20180502_1733"),
("peering", "0013_auto_20180505_1545"),
("peering", "0014_auto_20180519_2128"),
("peering", "0015_peeringsession_password"),
("peering", "0016_auto_20180726_1307"),
("peering", "0017_auto_20180802_2309"),
("peering", "0018_auto_20181014_1612"),
("peering", "0019_router_netbox_device_id"),
("peering", "0020_auto_20181105_0850"),
("peering", "0021_auto_20181113_2136"),
("peering", "0022_auto_20181116_2226"),
("peering", "0023_auto_20181208_2202"),
("peering", "0024_auto_20181212_2106"),
("peering", "0025_auto_20181212_2322"),
(
"peering",
"0026_autonomoussystem_potential_internet_exchange_peering_sessions",
),
("peering", "0027_auto_20190105_1600"),
("peering", "0028_internetexchangepeeringsession_is_router_server"),
("peering", "0029_auto_20190114_2141"),
("peering", "0030_directpeeringsession_router"),
("peering", "0031_auto_20190227_2210"),
("peering", "0032_auto_20190302_1415"),
("peering", "0033_router_encrypt_passwords"),
("peering", "0034_auto_20190308_1954"),
("peering", "0035_auto_20190311_2334"),
("peering", "0036_auto_20190411_2209"),
("peering", "0037_auto_20190412_2102"),
("peering", "0038_auto_20190412_2233"),
("peering", "0039_routingpolicy_address_family"),
("peering", "0040_auto_20190417_1851"),
("peering", "0041_auto_20190430_1743"),
("peering", "0042_auto_20190509_1439"),
("peering", "0043_router_use_netbox"),
("peering", "0044_auto_20190513_2153"),
("peering", "0045_auto_20190514_2308"),
("peering", "0046_auto_20190608_2215"),
("peering", "0047_auto_20190619_1434"),
("peering", "0048_auto_20190707_1854"),
("peering", "0049_auto_20190731_1946"),
("peering", "0050_auto_20190806_2159"),
("peering", "0051_auto_20190818_1816"),
("peering", "0052_auto_20190818_1926"),
("peering", "0053_auto_20190921_2000"),
("peering", "0054_auto_20191031_2241"),
("peering", "0055_auto_20191110_1312"),
]
initial = True
dependencies = [
("taggit", "0003_taggeditem_add_unique_index"),
("utils", "0001_v1.0.0"),
]
operations = [
migrations.CreateModel(
name="AutonomousSystem",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("asn", peering.fields.ASNField(unique=True)),
("name", models.CharField(max_length=128)),
("comment", models.TextField(blank=True)),
(
"ipv6_max_prefixes",
models.PositiveIntegerField(blank=True, null=True),
),
(
"ipv4_max_prefixes",
models.PositiveIntegerField(blank=True, null=True),
),
("updated", models.DateTimeField(auto_now=True, null=True)),
("irr_as_set", models.CharField(blank=True, max_length=255, null=True)),
("ipv4_max_prefixes_peeringdb_sync", models.BooleanField(default=True)),
("ipv6_max_prefixes_peeringdb_sync", models.BooleanField(default=True)),
("irr_as_set_peeringdb_sync", models.BooleanField(default=True)),
("created", models.DateTimeField(auto_now_add=True, null=True)),
],
options={"ordering": ["asn"]},
),
migrations.CreateModel(
name="ConfigurationTemplate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=128)),
("template", models.TextField()),
("updated", models.DateTimeField(auto_now=True, null=True)),
("comment", models.TextField(blank=True)),
("created", models.DateTimeField(auto_now_add=True, null=True)),
],
options={"ordering": ["name"]},
),
migrations.CreateModel(
name="Router",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=128)),
("hostname", models.CharField(max_length=256)),
(
"platform",
models.CharField(
blank=True,
choices=[
("junos", "Juniper JUNOS"),
("iosxr", "Cisco IOS-XR"),
("ios", "Cisco IOS"),
("nxos", "Cisco NX-OS"),
("eos", "Arista EOS"),
(None, "Other"),
],
help_text="The router platform, used to interact with it",
max_length=50,
),
),
("comment", models.TextField(blank=True)),
("created", models.DateTimeField(auto_now_add=True, null=True)),
("updated", models.DateTimeField(auto_now=True, null=True)),
(
"netbox_device_id",
models.PositiveIntegerField(blank=True, default=0),
),
],
options={"ordering": ["name"]},
),
migrations.CreateModel(
name="Community",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=128)),
("value", peering.fields.CommunityField(max_length=50)),
("comment", models.TextField(blank=True)),
(
"type",
models.CharField(
choices=[("egress", "Egress"), ("ingress", "Ingress")],
default="ingress",
max_length=50,
),
),
("created", models.DateTimeField(auto_now_add=True, null=True)),
("updated", models.DateTimeField(auto_now=True, null=True)),
],
options={"verbose_name_plural": "communities", "ordering": ["name"]},
),
migrations.CreateModel(
name="RoutingPolicy",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True, null=True)),
("updated", models.DateTimeField(auto_now=True, null=True)),
("name", models.CharField(max_length=128)),
("slug", models.SlugField(unique=True)),
(
"type",
models.CharField(
choices=[
("import-policy", "Import"),
("export-policy", "Export"),
],
default="import-policy",
max_length=50,
),
),
("comment", models.TextField(blank=True)),
],
options={"verbose_name_plural": "routing policies", "ordering": ["name"]},
),
migrations.CreateModel(
name="InternetExchange",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=128)),
("slug", models.SlugField(unique=True)),
("comment", models.TextField(blank=True)),
(
"configuration_template",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peering.ConfigurationTemplate",
),
),
("ipv4_address", models.GenericIPAddressField(blank=True, null=True)),
("ipv6_address", models.GenericIPAddressField(blank=True, null=True)),
(
"router",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peering.Router",
),
),
(
"communities",
models.ManyToManyField(blank=True, to="peering.Community"),
),
("peeringdb_id", models.PositiveIntegerField(blank=True, null=True)),
("check_bgp_session_states", models.BooleanField(default=False)),
(
"bgp_session_states_update",
models.DateTimeField(blank=True, null=True),
),
("created", models.DateTimeField(auto_now_add=True, null=True)),
("updated", models.DateTimeField(auto_now=True, null=True)),
(
"export_routing_policies",
models.ManyToManyField(
blank=True,
related_name="internetexchange_export_routing_policies",
to="peering.RoutingPolicy",
),
),
(
"import_routing_policies",
models.ManyToManyField(
blank=True,
related_name="internetexchange_import_routing_policies",
to="peering.RoutingPolicy",
),
),
],
options={"ordering": ["name"]},
),
migrations.CreateModel(
name="DirectPeeringSession",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True, null=True)),
("updated", models.DateTimeField(auto_now=True, null=True)),
("ip_address", models.GenericIPAddressField()),
("password", models.CharField(blank=True, max_length=255, null=True)),
("enabled", models.BooleanField(default=True)),
(
"bgp_state",
models.CharField(
blank=True,
choices=[
("idle", "Idle"),
("connect", "Connect"),
("active", "Active"),
("opensent", "OpenSent"),
("openconfirm", "OpenConfirm"),
("established", "Established"),
],
max_length=50,
null=True,
),
),
(
"received_prefix_count",
models.PositiveIntegerField(blank=True, null=True),
),
(
"advertised_prefix_count",
models.PositiveIntegerField(blank=True, null=True),
),
("comment", models.TextField(blank=True)),
("local_asn", peering.fields.ASNField(default=0)),
(
"relationship",
models.CharField(
choices=[
("private-peering", "Private Peering"),
("transit-provider", "Transit Provider"),
("customer", "Customer"),
],
help_text="Relationship with the remote peer.",
max_length=50,
),
),
(
"autonomous_system",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="peering.AutonomousSystem",
),
),
("last_established_state", models.DateTimeField(blank=True, null=True)),
(
"export_routing_policies",
models.ManyToManyField(
blank=True,
related_name="directpeeringsession_export_routing_policies",
to="peering.RoutingPolicy",
),
),
(
"import_routing_policies",
models.ManyToManyField(
blank=True,
related_name="directpeeringsession_import_routing_policies",
to="peering.RoutingPolicy",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="InternetExchangePeeringSession",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("ip_address", models.GenericIPAddressField()),
("comment", models.TextField(blank=True)),
(
"autonomous_system",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="peering.AutonomousSystem",
),
),
(
"internet_exchange",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="peering.InternetExchange",
),
),
("enabled", models.BooleanField(default=True)),
(
"bgp_state",
models.CharField(
blank=True,
choices=[
("idle", "Idle"),
("connect", "Connect"),
("active", "Active"),
("opensent", "OpenSent"),
("openconfirm", "OpenConfirm"),
("established", "Established"),
],
max_length=50,
null=True,
),
),
(
"advertised_prefix_count",
models.PositiveIntegerField(blank=True, null=True),
),
(
"received_prefix_count",
models.PositiveIntegerField(blank=True, null=True),
),
("password", models.CharField(blank=True, max_length=255, null=True)),
("created", models.DateTimeField(auto_now_add=True, null=True)),
("updated", models.DateTimeField(auto_now=True, null=True)),
("last_established_state", models.DateTimeField(blank=True, null=True)),
(
"export_routing_policies",
models.ManyToManyField(
blank=True,
related_name="internetexchangepeeringsession_export_routing_policies",
to="peering.RoutingPolicy",
),
),
(
"import_routing_policies",
models.ManyToManyField(
blank=True,
related_name="internetexchangepeeringsession_import_routing_policies",
to="peering.RoutingPolicy",
),
),
],
),
migrations.RunPython(
code=forward_transition_from_none_to_zero,
reverse_code=reverse_transition_from_none_to_zero,
),
migrations.AlterField(
model_name="autonomoussystem",
name="ipv4_max_prefixes",
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name="autonomoussystem",
name="ipv6_max_prefixes",
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name="directpeeringsession",
name="advertised_prefix_count",
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name="directpeeringsession",
name="received_prefix_count",
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name="internetexchange",
name="peeringdb_id",
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name="internetexchangepeeringsession",
name="advertised_prefix_count",
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name="internetexchangepeeringsession",
name="received_prefix_count",
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.RunPython(
code=forward_transition_from_minus_one_to_zero,
reverse_code=reverse_transition_from_minus_one_to_zero,
),
migrations.AddField(
model_name="autonomoussystem",
name="potential_internet_exchange_peering_sessions",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.GenericIPAddressField(),
blank=True,
default=list,
size=None,
),
),
migrations.RunPython(
code=forward_transition_from_none_to_empty_list,
reverse_code=reverse_transition_from_none_to_empty_list,
),
migrations.AddField(
model_name="internetexchangepeeringsession",
name="is_route_server",
field=models.BooleanField(blank=True, default=False),
),
migrations.AlterModelOptions(
name="internetexchange",
options={
"ordering": ["name"],
"permissions": [
(
"view_configuration",
"Can view Internet Exchange's configuration",
),
(
"deploy_configuration",
"Can deploy Internet Exchange's configuration",
),
],
},
),
migrations.AddField(
model_name="directpeeringsession",
name="router",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peering.Router",
),
),
migrations.AlterModelOptions(
name="directpeeringsession",
options={"ordering": ["autonomous_system", "ip_address"]},
),
migrations.AlterModelOptions(
name="internetexchangepeeringsession",
options={"ordering": ["autonomous_system", "ip_address"]},
),
migrations.AlterField(
model_name="router",
name="platform",
field=models.CharField(
blank=True,
choices=[
("junos", "Juniper JUNOS"),
("iosxr", "Cisco IOS-XR"),
("ios", "Cisco IOS"),
("nxos", "Cisco NX-OS"),
("eos", "Arista EOS"),
("", "Other"),
],
help_text="The router platform, used to interact with it",
max_length=50,
),
),
migrations.AddField(
model_name="router",
name="encrypt_passwords",
field=models.BooleanField(
blank=True,
default=True,
help_text="Try to encrypt passwords in router's configuration",
),
),
migrations.AlterModelOptions(
name="routingpolicy",
options={
"ordering": ["-weight", "name"],
"verbose_name_plural": "routing policies",
},
),
migrations.AddField(
model_name="routingpolicy",
name="weight",
field=models.PositiveSmallIntegerField(
default=0, help_text="The higher the number, the higher the priority"
),
),
migrations.AlterField(
model_name="routingpolicy",
name="type",
field=models.CharField(
choices=[
("export-policy", "Export"),
("import-policy", "Import"),
("import-export-policy", "Import and Export"),
],
default="import-policy",
max_length=50,
),
),
migrations.AddField(
model_name="autonomoussystem",
name="contact_email",
field=models.EmailField(
blank=True, max_length=254, verbose_name="Contact E-mail"
),
),
migrations.AddField(
model_name="autonomoussystem",
name="contact_name",
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name="autonomoussystem",
name="contact_phone",
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name="routingpolicy",
name="address_family",
field=models.PositiveSmallIntegerField(
choices=[(0, "All"), (4, "IPv4"), (6, "IPv6")], default=0
),
),
migrations.AlterField(
model_name="autonomoussystem",
name="potential_internet_exchange_peering_sessions",
field=django.contrib.postgres.fields.ArrayField(
base_field=netfields.fields.InetAddressField(max_length=39),
blank=True,
default=list,
size=None,
),
),
migrations.AlterField(
model_name="directpeeringsession",
name="ip_address",
field=netfields.fields.InetAddressField(max_length=39),
),
migrations.AlterField(
model_name="internetexchange",
name="ipv4_address",
field=netfields.fields.InetAddressField(
blank=True,
max_length=39,
null=True,
validators=[utils.validators.AddressFamilyValidator(4)],
),
),
migrations.AlterField(
model_name="internetexchange",
name="ipv6_address",
field=netfields.fields.InetAddressField(
blank=True,
max_length=39,
null=True,
validators=[utils.validators.AddressFamilyValidator(6)],
),
),
migrations.AlterField(
model_name="internetexchangepeeringsession",
name="ip_address",
field=netfields.fields.InetAddressField(max_length=39),
),
migrations.AlterField(
model_name="routingpolicy",
name="type",
field=models.CharField(
choices=[
("export-policy", "Export"),
("import-policy", "Import"),
("import-export-policy", "Import+Export"),
],
default="import-policy",
max_length=50,
),
),
migrations.AddField(
model_name="directpeeringsession",
name="multihop_ttl",
field=peering.fields.TTLField(
blank=True,
default=1,
help_text="Use a value greater than 1 for BGP multihop sessions",
verbose_name="Multihop TTL",
),
),
migrations.AddField(
model_name="internetexchangepeeringsession",
name="multihop_ttl",
field=peering.fields.TTLField(
blank=True,
default=1,
help_text="Use a value greater than 1 for BGP multihop sessions",
verbose_name="Multihop TTL",
),
),
migrations.AddField(
model_name="router",
name="use_netbox",
field=models.BooleanField(
blank=True,
default=False,
help_text="Use NetBox to communicate instead of NAPALM",
),
),
migrations.AddField(
model_name="autonomoussystem",
name="export_routing_policies",
field=models.ManyToManyField(
blank=True,
related_name="autonomoussystem_export_routing_policies",
to="peering.RoutingPolicy",
),
),
migrations.AddField(
model_name="autonomoussystem",
name="import_routing_policies",
field=models.ManyToManyField(
blank=True,
related_name="autonomoussystem_import_routing_policies",
to="peering.RoutingPolicy",
),
),
migrations.CreateModel(
name="BGPGroup",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True, null=True)),
("updated", models.DateTimeField(auto_now=True, null=True)),
("name", models.CharField(max_length=128)),
("slug", models.SlugField(max_length=255, unique=True)),
("comments", models.TextField(blank=True)),
(
"communities",
models.ManyToManyField(blank=True, to="peering.Community"),
),
(
"export_routing_policies",
models.ManyToManyField(
blank=True,
related_name="bgpgroup_export_routing_policies",
to="peering.RoutingPolicy",
),
),
(
"import_routing_policies",
models.ManyToManyField(
blank=True,
related_name="bgpgroup_import_routing_policies",
to="peering.RoutingPolicy",
),
),
(
"bgp_session_states_update",
models.DateTimeField(blank=True, null=True),
),
("check_bgp_session_states", models.BooleanField(default=False)),
(
"tags",
taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
],
options={"verbose_name": "BGP group", "ordering": ["name"]},
),
migrations.AddField(
model_name="directpeeringsession",
name="bgp_group",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peering.BGPGroup",
verbose_name="BGP Group",
),
),
migrations.AddField(
model_name="router",
name="configuration_template",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peering.ConfigurationTemplate",
),
),
migrations.AlterModelOptions(
name="router",
options={
"ordering": ["name"],
"permissions": [
("view_configuration", "Can view router's configuration"),
("deploy_configuration", "Can deploy router's configuration"),
],
},
),
migrations.AlterField(
model_name="router",
name="encrypt_passwords",
field=models.BooleanField(
blank=True,
default=False,
help_text="Try to encrypt passwords for peering sessions",
),
),
migrations.AlterField(
model_name="internetexchange",
name="slug",
field=models.SlugField(max_length=255, unique=True),
),
migrations.AlterField(
model_name="routingpolicy",
name="slug",
field=models.SlugField(max_length=255, unique=True),
),
migrations.RenameModel(old_name="ConfigurationTemplate", new_name="Template"),
migrations.AddField(
model_name="template",
name="type",
field=models.CharField(
choices=[("configuration", "Configuration"), ("email", "E-mail")],
default="configuration",
max_length=50,
),
),
migrations.AlterField(
model_name="internetexchange",
name="configuration_template",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peering.Template",
),
),
migrations.AlterField(
model_name="router",
name="configuration_template",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peering.Template",
),
),
migrations.AlterModelOptions(
name="community",
options={
"ordering": ["value", "name"],
"verbose_name_plural": "communities",
},
),
migrations.AddField(
model_name="directpeeringsession",
name="local_ip_address",
field=netfields.fields.InetAddressField(
blank=True, max_length=39, null=True
),
),
migrations.AddField(
model_name="autonomoussystem",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="community",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="directpeeringsession",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="internetexchange",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="internetexchangepeeringsession",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="router",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="routingpolicy",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="template",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.RenameField(
model_name="autonomoussystem", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="community", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="directpeeringsession", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="internetexchange", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="internetexchangepeeringsession",
old_name="comment",
new_name="comments",
),
migrations.RenameField(
model_name="router", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="routingpolicy", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="template", old_name="comment", new_name="comments"
),
migrations.AlterModelOptions(name="internetexchange", options={}),
migrations.RemoveField(
model_name="internetexchange", name="configuration_template"
),
migrations.RunPython(code=add_permissions, reverse_code=remove_permissions),
migrations.AlterField(
model_name="autonomoussystem",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterField(
model_name="bgpgroup",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterField(
model_name="community",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterField(
model_name="directpeeringsession",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterField(
model_name="internetexchange",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterField(
model_name="internetexchangepeeringsession",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterField(
model_name="router",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterField(
model_name="routingpolicy",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterField(
model_name="template",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.TaggedItem",
to="utils.Tag",
verbose_name="Tags",
),
),
migrations.AlterModelOptions(
name="autonomoussystem",
options={
"ordering": ["asn"],
"permissions": [("send_email", "Can send e-mails to AS contact")],
},
),
migrations.AddField(
model_name="directpeeringsession",
name="encrypted_password",
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name="internetexchangepeeringsession",
name="encrypted_password",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"guillaume@mazoyer.eu"
] | guillaume@mazoyer.eu |
e9f0d477d5711458494355bce81a598338b905cb | 300e52faf63604bfb7ec81b9a73da853e71e36c6 | /blog/migrations/0001_initial.py | de21b336f6961767703ed4fa9218ccb186a78332 | [] | no_license | ivan-placid/my-2nd-blog | 8e7fb9b5c47aaff4437b1d4f0acb31f556e2924d | 56604079fd79f2d8b3622d7ffe1d8b53605b445e | refs/heads/master | 2021-01-11T05:33:42.664633 | 2016-10-26T11:31:27 | 2016-10-26T11:31:27 | 71,799,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-24 16:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100, unique=True)),
('content', models.TextField()),
('date_added', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date_added',),
},
),
]
| [
"ivan.placid@gmail.com"
] | ivan.placid@gmail.com |
b9d1e76ff332e8251bd81fc607e11351f3a4d2ca | b044ca9aff959ed333979695be7222f5c2b6d5e4 | /study_2018/20180612/6.py | a92b5c908200300d03727a1f7d987593d296148f | [] | no_license | waterwx/study_2018 | 5c19ca9cd523fdff0136a0dd95a9618cf799452b | 01b9c4b1fa8429659e18ed4da10105ef507e6b13 | refs/heads/master | 2020-03-19T14:28:05.292926 | 2018-06-28T14:25:24 | 2018-06-28T14:25:24 | 136,624,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | jflajdkflajo
fjlkajsdflka
jffffllllajdslff;
jjjlll
ffflll
| [
"406726179@qq.com"
] | 406726179@qq.com |
95d7bc01e08a48603913adb10222767d70c2cb9c | 4ca053209b44a0dc108d23183910b4069704ee5b | /lofaro_light.py | a03fa67f4bcb12f1152910d2e0dd97aa5bad7890 | [] | no_license | thedancomplex/autonomous-lights-lofaro-lights | 15e69c080d59a0877e6f40b69e29b5b3008943f4 | 49039a8797618331820b5482c017864c902a0655 | refs/heads/master | 2022-12-12T21:22:31.944036 | 2020-09-05T01:39:31 | 2020-09-05T01:39:31 | 292,980,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | #!/usr/bin/python3
from pyHS100 import SmartBulb
from pprint import pformat as pf
import random as r
import time as t
#bulb = SmartBulb("10.66.3.17")
#print("Hardware: %s" % pf(bulb.hw_info))
#print("Full sysinfo: %s" % pf(bulb.get_sysinfo()))
#bulb.turn_on()
#bulb.hsv = (180, 100, 100)
i = 0
while True:
h = r.random() * 270
h = int(h)
#h = 100
s = r.random() * 100
s = int(s)
s = 100
for y in range(1,4):
for x in range(17,25):
#for x in range(1,35):
addr = "10.66."+str(y)+"."+str(x)
print(addr)
if ( ( x == 24 ) & (y == 1 ) ):
pass
else:
try:
b = SmartBulb(addr)
b.turn_on()
# b.turn_off()
# b.turn_off()
# b.turn_off()
# b.turn_off()
#s = 100
b.hsv = (h,s, 100)
except:
pass
print(i)
i += 1
t.sleep(1.0)
| [
"dan@danlofaro.com"
] | dan@danlofaro.com |
e4f9570ca88d4d268cf94d22e8faeb9b81e4d32f | abc5710e8b80b8969e602c53aaa1a5187d2adeee | /wsgi.py | dd77627b6e86bebdd22f0400b4be9a228e04a097 | [] | no_license | mohammad-safakhou/nava | f709356afa2cd2e315183216b6727fe94a15e6e4 | 85b441927d12388fefea1afd228e766e2fd2c233 | refs/heads/master | 2023-06-19T07:31:28.027055 | 2021-07-16T00:42:01 | 2021-07-16T00:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from factory import create_app
from config import ProductionConfig
app = create_app(ProductionConfig)
if __name__ == "__main__":
app.run()
| [
"mnim220@yahoo.com"
] | mnim220@yahoo.com |
2c72c0fd9afadc5369dafc83a72510e88f785872 | d70a16f353819ff858dbe6974916a936a85a3c0e | /api/migrations/0003_auto_20201217_1941.py | 60ba065342ebb9755ec4749f75c6cf4cc1ac6880 | [] | no_license | mahmud-sajib/FBuzz-Task | 7fa69a35d1dfe069ed48e2956d1eff16cf953c74 | a57bc031911fd7259c68890a953d9d8175246f73 | refs/heads/master | 2023-02-02T05:56:01.208849 | 2020-12-19T07:03:24 | 2020-12-19T07:03:24 | 321,357,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # Generated by Django 3.1 on 2020-12-17 13:41
import api.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20201217_1751'),
]
operations = [
migrations.AlterField(
model_name='cvfileupload',
name='document',
field=models.FileField(upload_to='documents/', validators=[api.validators.validate_file_size]),
),
]
| [
"shout.mahmud@gmail.com"
] | shout.mahmud@gmail.com |
85f8207c1a52da4c91cfcc22bb76bd8dd60589aa | 40fa413a9ba362ab8cc2474269f83bb87847cda2 | /setup.py | a7a9aee54a8f3d08813d67be79e79b61855eaffc | [] | no_license | Peder2911/leanfeeder | c366563527c6e6b65cf46f8564596d1637337026 | f50ed3845aac21b6eed81eb1ef72c39175c87c8d | refs/heads/master | 2023-01-01T13:55:49.037014 | 2020-10-15T12:02:43 | 2020-10-15T12:02:43 | 301,992,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py |
import setuptools
with open("README.md") as f:
long_description=f.read()
setuptools.setup(
name = "leanfeeder",
version = "0.0.1",
author = "Peder G. Landsverk",
author_email = "pglandsverk@gmail.com",
description = "Tool for pushing data to a Postgres DB without too much hassle.",
long_description = long_description,
long_description_content_type="test/markdown",
url = "https://www.github.com/peder2911/leanfeeder",
packages = setuptools.find_packages(),
scripts=["bin/leanf"],
python_requires=">=3.7",
install_requires=[
"strconv>=0.4.0",
"psycopg2>=2.8.0",
"fire>=0.3.0",
"python-dateutil>=2.8.0"
])
| [
"pglandsverk@gmail.com"
] | pglandsverk@gmail.com |
205df4acc61ce3ee987006deb9188b1713867ac0 | abbc885208c9fc8dabafe4eb2dcc9ac83954dc62 | /read_rbsProfs.py | 1219ff2222f0f09eea3f188742565ca8c3a7b24c | [] | no_license | xingl/backups | 4406c9d35dea8b3ff47f1ecd36e3587c895b36da | 3130338c1f2fd80da626476699a2a79d5d8ed924 | refs/heads/master | 2021-01-20T22:29:08.115638 | 2016-05-30T03:05:59 | 2016-05-30T03:05:59 | 59,974,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,643 | py | #! /usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from interp import *
def first_derivative(f_in,x_in):
x = x_in.flatten()
f = f_in.flatten()
dx = x[1]-x[0]
dfdx = np.empty(len(f))
for i in range(len(f)):
if i == 0:
dfdx[i] = (f[i+1] - f[i])/dx
elif i == 1 or i == len(f)-2:
dfdx[i] = (f[i+1]-f[i-1])/2./dx
elif i == len(f)-1:
dfdx[i] = (f[i] - f[i-1])/dx
else:
dfdx[i] = (-f[i+2]+8.*f[i+1]-8.*f[i-1]+f[i-2])/12./dx
return dfdx
def rhot2psip(rbsProfs_file_name,flux_surface):
rhot_fs = float(flux_surface)
rbsProfs_data = np.genfromtxt(rbsProfs_file_name)
psip_data_full = rbsProfs_data[:,1]
rhot_data_full = rbsProfs_data[:,0]
ind = np.argmin(abs(rhot_data_full-rhot_fs))
psip_data = rbsProfs_data[ind-10:ind+10,1]
rhot_data = rbsProfs_data[ind-10:ind+10,0]
rhot_psip_spl = interpolate.UnivariateSpline(rhot_data,psip_data)
psip_fs = rhot_psip_spl(rhot_fs)
print 'rhot = ',rhot_fs
print 'psip = ',psip_fs
return psip_fs
def read_rbsProfs(rbsProfs_file_name,flux_surface):
n_spl = 10
rbsProfs_data = np.genfromtxt(rbsProfs_file_name)
rhot_fs = float(flux_surface)
rhot_data_full = rbsProfs_data[:,0]
ind = np.argmin(abs(rhot_data_full-rhot_fs))
psip_data = rbsProfs_data[ind-n_spl:ind+n_spl,1]
rhot_data = rbsProfs_data[ind-n_spl:ind+n_spl,0]
#rhot_data_unif = np.linspace(rhot_data[0],rhot_data[-1],len(rhot_data)*1000)
#psip_data_unif = interp(rhot_data,psip_data,rhot_data_unif)
R_obmp = rbsProfs_data[ind,24]
n_data = rbsProfs_data[ind-n_spl:ind+n_spl,3]
n_rhot_spl = interpolate.UnivariateSpline(rhot_data,n_data)
fprime_obmp = abs(n_rhot_spl(rhot_fs,nu=1)/n_rhot_spl(rhot_fs))
print 'n = ', n_rhot_spl(rhot_fs)
print 'fprime=',fprime_obmp
Ti_data = rbsProfs_data[ind-n_spl:ind+n_spl,4]
Ti_rhot_spl = interpolate.UnivariateSpline(rhot_data,Ti_data)
tprime_obmp = abs(Ti_rhot_spl(rhot_fs,nu=1)/Ti_rhot_spl(rhot_fs))
print 'Ti = ', Ti_rhot_spl(rhot_fs)
print 'tprime=',tprime_obmp
q_data = rbsProfs_data[ind-n_spl:ind+n_spl,23]
q_rhot_spl = interpolate.UnivariateSpline(rhot_data,q_data)
shat_obmp = q_rhot_spl(rhot_fs,nu=1)/q_rhot_spl(rhot_fs)*rhot_fs
print 'q = ',q_rhot_spl(rhot_fs)
print 'shat=',shat_obmp
Bp_obmp = rbsProfs_data[ind,25]
Bt_obmp = rbsProfs_data[ind,26]
gamE_obmp = rbsProfs_data[ind,9]
B_tot_obmp = np.sqrt(Bp_obmp**2+Bt_obmp**2)
return R_obmp,Bp_obmp,abs(Bt_obmp),B_tot_obmp,gamE_obmp,abs(tprime_obmp),abs(fprime_obmp),shat_obmp
| [
"xingliu@physics.utexas.edu"
] | xingliu@physics.utexas.edu |
3f0880e9aeb1dc1e7507225cffb3501d7a092b26 | 2d4dfec7a2b40588da529e774d34c4c5002ee11a | /Scripts/pip3-script.py | 5d6fb03bd799f5bf387d79a32e043525f5413197 | [] | no_license | Bobcat1238/HelloWorld-Python | 54df7e3005c4fed59e1bc9960d33ed453888940c | f8c0201051b1e99eca2e1f043c046991a06ef8cb | refs/heads/master | 2020-12-04T08:40:59.368116 | 2020-01-09T03:11:05 | 2020-01-09T03:11:05 | 231,698,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #!C:\Users\gabar\source\Python\HelloWorld\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"g.barth@live.com"
] | g.barth@live.com |
2f5a559f6f876765e404d8a95601e163f085fcfe | f19cc408729222b0e745c8ee1b947c146a270b67 | /python/mockup/M_verifier.py | 71b654c297fe3d33d5c6af23c07366cde644add6 | [] | no_license | securekim/DID | e6d4353f5764b6933cd3cb0fa1033fd857f31934 | 3cee5847a370c6d76632301cf538da18b3000910 | refs/heads/main | 2023-06-09T10:02:07.304201 | 2021-07-04T20:32:45 | 2021-07-04T20:32:45 | 372,722,882 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,780 | py | # -*- coding: utf-8 -*-
import ed25519
import base64
import base58
import requests
import random
import string
import sys
import json
import re
import bottle
import canister
import time
import requests
from bottle import response, request, HTTPResponse
from multiprocessing import Process
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import parse_qs
my_port = 3333
#challenge_global = ""
#pubkey_global = ""
app = bottle.Bottle()
app.install(canister.Canister())
#if len(sys.argv[1:]) < 1:
# sys.exit("[검증자] 도전받는자의 DID가 필요함)
#challengee_did = str(sys.argv[1:][0])
challengee_did = "did:mtm:ExsNKhvF3pqwDvFaVaiQnWWdyeVwxd"
pattern = re.compile("^did:mtm:[a-km-zA-HJ-NP-Z1-9]{30,30}$")
if not pattern.match(challengee_did):
sys.exit("Invalid DID provided")
universal_resolver_addr = "https://did-resolver.mitum.com/ddo/" # universal resolver를 사용하는 경우
def verifyString(challenge, sigStr, pubkey):
try:
verifying_key = ed25519.VerifyingKey(base64.b64encode(base58.b58decode(pubkey)),
encoding="base64")
signedSignature_base58 = sigStr
signedSignature = base58.b58decode(signedSignature_base58)
verifying_key.verify(signedSignature,
challenge.encode("utf8"),
encoding=None)
return True
except Exception:
return False
def callback_http(s):
s.serve_forever()
# challenge generation function
def id_generator(size=32, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def challenging(did):
challenge = id_generator()
try:
did_req = requests.get("http://49.50.164.195:8080/v1/DIDDocument?did="+did)
pubkey = json.loads(json.loads(did_req.text)['data'])['verificationMethod'][0]['publicKeyBase58']
except Exception:
pubkey = "3rfrZgGZHXpjiGr1m3SKAbZSktYudfJCBsoJm4m1XUgp"
print("[검증자] 랜덤 생성한 챌린지 컨텐츠 : %s" % challenge)
print("[검증자][document] 도전받는자의 공개키 : %s" % pubkey)
pubkey_global = pubkey
challenge_global = challenge
return challenge_global, pubkey_global
@app.get('/challenge')
def challenge():
#get_body = request.body.read()
global challenge_global
global pubkey_global
try:
get_body = request.query['did']
challenge_global, pubkey_global = challenging(get_body)
except Exception:
response.status = 400
return "Error"
print(challenge_global)
raise HTTPResponse(json.dumps({"payload": challenge_global}), status=202, headers={})
#challenging(get_body)
@app.get('/response')
def response():
#get_body = request.body.read()
try:
get_body = request.query['signature']
except Exception:
response.status = 400
return "Error"
try:
challengeRet = verifyString(challenge_global, get_body, pubkey_global)
print("[검증자] 받은 사인 값 : %s" % get_body)
except Exception:
challengeRet = False
print("[검증자] 검증 결과 : %s" % challengeRet)
raise HTTPResponse(json.dumps({"Response": challengeRet}), status=202, headers={})
if __name__ == "__main__":
#signTest()
app.run(host='0.0.0.0', port=my_port)
#http://172.28.91.165:3333/challenge?did=did:mtm:DTxegdAVdSe9WL1tS7AZ3bEs4dXn1XZnSboP7NRXAjb6
#http://172.28.91.165:3333/response?signature=abcdef
#http://mitum.securekim.com:3333/challenge?did=did:mtm:DTxegdAVdSe9WL1tS7AZ3bEs4dXn1XZnSboP7NRXAjb6
#http://wiggler.securekim.com:3333/response?signature=abcdef | [
"admin@securekim.com"
] | admin@securekim.com |
34c980c93635be5b73ec56d4a12b6d39e402fe13 | b15f6fc21375a2ed06c291a34a5203e2b051056e | /hackerrank codes/30 Days of Code/Day 2.txt | 05a3ba5a53bf677b0a70c0d65d305081c1f8ae8c | [
"MIT"
] | permissive | SoniSakshi1999/HackerRank_Solutions- | e1f942ef6d42fa4273ed1bb75fd7d0e09212404c | fcb2c95081204f5ae49bf27e895df401f36b0dd0 | refs/heads/master | 2022-11-26T05:10:27.329161 | 2020-08-08T10:30:50 | 2020-08-08T10:30:50 | 277,254,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | txt | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(meal_cost, tip_percent, tax_percent):
tip = (meal_cost*(tip_percent/100))
tax = (meal_cost*(tax_percent/100))
total_cost = meal_cost + tip + tax
ans = round(total_cost)
print(ans)
if __name__ == '__main__':
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
solve(meal_cost, tip_percent, tax_percent)
| [
"noreply@github.com"
] | SoniSakshi1999.noreply@github.com |
18c298b110833ad5b1860e533427d320c882c52d | cca5ceb42b09e567d79fcb46f298757c1ff04447 | /ObjectOriented/DataStructure.py | 45fb0eea3cc51bded3a1c13868377b5c1980c3d7 | [] | no_license | NishantGhanate/PythonScripts | 92933237720e624a0f672729743a98557bea79d6 | 60b92984d21394002c0d3920bc448c698e0402ca | refs/heads/master | 2022-12-13T11:56:14.442286 | 2022-11-18T14:26:33 | 2022-11-18T14:26:33 | 132,910,530 | 25 | 15 | null | 2022-12-09T09:03:58 | 2018-05-10T14:18:33 | Python | UTF-8 | Python | false | false | 1,468 | py | class Que:
def __init__(self , contents):
self._hiddenlist = list(contents)
def push(self,value):
self._hiddenlist.insert(0 , value)
print(self._hiddenlist)
def pop(self):
if len(self._hiddenlist):
self._hiddenlist.pop(0)
print(self._hiddenlist)
else:
print("Empty Que")
que = Que([1, 2.25, 3.0, 4, 1234.5])
que.push(0)
que.pop()
class Node:
def __init__(self, dataValue ):
self.dataValue = dataValue
self.nextValue = None
class Slink:
def __init__(self):
self.headValue = None
def printLink(self):
printval = self.headValue
while printval is not None:
print (printval.dataValue)
printval = printval.nextValue
def atStart(self,newData):
NewNode = Node(newData)
NewNode.nextValue = self.headValue
self.headValue = NewNode
# lis = Slink()
# lis.atStart("Sun")
# lis.atStart("Mon")
# lis.printLink()
class Stack:
def __init__(self):
self.stack = [10]
def push(self,dataValue):
self.stack.append(dataValue)
return self.stack
def pop(self):
if len(self.stack) <= 0:
return ("No element in the Stack")
else:
return "This value pop =" +self.stack.pop()
# stack = Stack()
# stack.push("1")
# stack.push("2")
# stack.push("3")
# print(stack.pop())
# print(stack.push("5"))
| [
"nishant7.ng@gmail.com"
] | nishant7.ng@gmail.com |
b0f30ad19d6e23d4e343b4fd99dc41b0b026118b | 07a0f62a9c99e25d5c348ee812ef1e49c43d2d7b | /test/test_sim/test_propagate.py | fc3c39340425cca7ea4795585814999355648f3c | [
"BSD-3-Clause"
] | permissive | kyuepublic/tomopy | 15b39a8674b6ccfd7c77b177a35dd14f43720f78 | 3f334ad762f8d6ad2108838aa9a620b3694debe2 | refs/heads/master | 2020-12-24T06:51:35.271931 | 2016-11-30T23:00:01 | 2016-11-30T23:00:01 | 67,078,254 | 1 | 0 | null | 2016-08-31T22:32:30 | 2016-08-31T22:32:29 | null | UTF-8 | Python | false | false | 3,844 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2015. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from test.util import read_file
from tomopy.sim.propagate import *
import numpy as np
from numpy.testing import assert_array_almost_equal
__author__ = "Doga Gursoy"
__copyright__ = "Copyright (c) 2015, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
if __name__ == '__main__':
import nose
nose.runmodule(exit=False)
| [
"sparsedata@gmail.com"
] | sparsedata@gmail.com |
b24e0fde7456a79dbb630f1bb302c6eb6ffd15b7 | 14438f8c8bb4250a7fa8da0ecd40c5a4902bdfcd | /hunter/set-10/96.py | 2719ef730b5a7689a140c4b85bbfd134580da795 | [] | no_license | nikhilvarshney2/GUVI | c51b1fa3bd1026eb74fc536e938a14c2e92089b2 | 79717ae5b26540101169e512204fb7236f7c839f | refs/heads/master | 2020-04-01T00:40:27.699963 | 2019-04-30T13:46:46 | 2019-04-30T13:46:46 | 152,707,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | def num(z):
if z<9:
return z
return 9 + 10*num(z-9)
z = int(input())
print(num(z))
| [
"nikhilvarshney9292@gmail.com"
] | nikhilvarshney9292@gmail.com |
0c7ca209f5437826a7c1527f09d0f27b55e5d412 | ac2f4c7caaf7ccc51ebcb2d88020fb4842b3f493 | /install.py | e20927c124212f7697dea81c1e707305db393903 | [] | no_license | vaikuntht/TAMU-Latex-Styles | 48b89291cb5b65348303cfee4bc8424a61b44adb | 8c1f096bbe3140eef6e14d001fa9d81905a28258 | refs/heads/master | 2021-01-18T00:04:04.722714 | 2014-07-31T18:49:42 | 2014-07-31T18:49:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,686 | py | #!/usr/bin/env python
import os, sys, getopt, argparse, fnmatch, errno, subprocess, tempfile, platform, getpass, pprint, shutil
from subprocess import call
#program name available through the %(prog)s command
#can use prog="" in the ArgumentParser constructor
#can use the type=int option to make the parameters integers
#can use the action='append' option to make a list of options
#can use the default="" option to automatically set a parameter
parser = argparse.ArgumentParser(description="Install the TAMU based LaTex style files.",
epilog="And those are the options available. Deal with it.")
group = parser.add_mutually_exclusive_group()
parser.add_argument("-nha","--nohash", help="Will run texhash command once the files are copied",
action="store_false")
group.add_argument("-q", "--quiet", help="decrease output verbosity to minimal amount",
action="store_true")
group.add_argument("-v", "--verbose", help="Increase output verbosity of lcg-cp (-v) or srm (-debug) commands",
action="store_true")
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument("-y", "--texlive_year", help="The texlive distribution year",
default="2014")
args = parser.parse_args()
if(args.verbose):
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
print "Argument ", args, "\n"
QUIET = args.quiet
VERBOSE = args.verbose
DOHASH = args.nohash
TEXLIVE_YEAR = args.texlive_year
theme_path = ""
color_path = ""
Outer_path = ""
def check_linux_folders():
global theme_path
global color_path
global outer_path
theme_path = "/usr/share/texmf/tex/latex/beamer/base/themes/theme/"
color_path = "/usr/share/texmf/tex/latex/beamer/base/themes/color/"
outer_path = "/usr/share/texmf/tex/latex/beamer/base/themes/outer/"
# To check if it is a directory (and it exists) use os.path.isdir
# To check if something exists (direcotry, file, or otherwise), use os.path.exists
theme = os.path.isdir(theme_path)
color = os.path.isdir(color_path)
outer = os.path.isdir(outer_path)
if not QUIET: print "Themes exists? " + str(theme)
if not QUIET: print "Color themes exists? " + str(color)
if not QUIET: print "Outer themes exists? " + str(outer)
if not theme:
print "ERROR::The path to the beamer themes ("+str(theme_path)+") does not exist."
print "Cannot continue."
sys.exit()
if not color:
print "ERROR::The path to the beamer colors ("+str(color_path)+") does not exist."
print "Cannot continue."
sys.exit()
if not outer:
print "ERROR::The path to the beamer outer themes ("+str(outer_path)+") does not exist."
print "Cannot continue."
sys.exit()
def check_osx_folders():
global theme_path
global color_path
global outer_path
theme_path = "/usr/local/texlive/"+TEXLIVE_YEAR+"/texmf-dist/tex/latex/beamer/themes/theme/"
color_path = "/usr/local/texlive/"+TEXLIVE_YEAR+"/texmf-dist/tex/latex/beamer/themes/color/"
outer_path = "/usr/local/texlive/"+TEXLIVE_YEAR+"/texmf-dist/tex/latex/beamer/themes/outer/"
theme = os.path.isdir(theme_path)
color = os.path.isdir(color_path)
outer = os.path.isdir(outer_path)
if not QUIET: print "Themes exists? " + str(theme)
if not QUIET: print "Color themes exists? " + str(color)
if not QUIET: print "Outer themes exists? " + str(outer)
if not theme:
print "ERROR::The path to the beamer themes ("+str(theme_path)+") does not exist."
print "Cannot continue."
sys.exit()
if not color:
print "ERROR::The path to the beamer colors ("+str(color_path)+") does not exist."
print "Cannot continue."
sys.exit()
if not outer:
print "ERROR::The path to the beamer outer themes ("+str(outer_path)+") does not exist."
print "Cannot continue."
sys.exit()
def privledge_check():
user = getpass.getuser()
if not QUIET: print "User = " + str(user)
if user != 'root':
print "Sorry, you are not \"root\" and do not have enough privledges to continue."
sys.exit()
def run_checks():
print "************************************"
print "* Running checks on the system ... *"
print "************************************"
privledge_check()
kernel = platform.system()
OS = ""
flavor = ""
version = ""
if kernel == 'Linux':
OS = "Linux"
flavor = platform.linux_distribution()[0]
version = platform.linux_distribution()[1]
if not QUIET: print str(flavor) + "(" + str(OS) + ")" + str(version)
check_linux_folders()
elif kernel == 'Darwin':
OS = "OSX"
flavor = "Unknown"
version = platform.mac_ver()[0]
if not QUIET: print str(OS) + " " + str(version)
check_osx_folders()
else:
print "ERROR::Unknown OS. Cannot confirm that installation will be successful. Process will not continue."
sys.exit()
print
def copy_set_of_files(dict, folder):
for dst in dict:
if not QUIET: print "Doing folder " + str(dst) + " ... "
for f in range(1,len(dict[dst])):
src = dict[dst][f]
dest = dict[dst][0]
if not QUIET: print "\tCopying " + str(folder) + str(src) + " to " + str(dest) + " ... ",
shutil.copy2(folder+src,dest)
if not QUIET: print "DONE"
def copy_files():
print "**********************************************"
print "* Copying the files to the correct paths ... *"
print "**********************************************"
copyfileBeamerDict = {
'theme' : (theme_path, "beamerthemeTAMU.sty"),
'color' : (color_path, "beamercolorthemetamu.sty", "beamercolorthemetamubox.sty"),
'outer' : (outer_path, "beamerouterthemeshadowTAMU.sty", "beamerouterthemesplittamu.sty", "UniversityLogos/beamerouterthemeTAMULogoBox.png", "ExperimentLogos/beamerouterthemeCMS.png","ExperimentLogos/beamerouterthemeCDF.png","LaboritoryLogos/beamerouterthemeCERN.png","LaboritoryLogos/beamerouterthemeFNAL.png")
}
if VERBOSE and not QUIET:
print "Dictionary"
print "----------"
pprint.pprint(copyfileBeamerDict)
print
copy_set_of_files(copyfileBeamerDict, "Beamer/")
print
def do_tex_hash():
print "***********************"
print "* Running texhash ... *"
print "***********************"
os.system("texhash")
run_checks()
copy_files()
if DOHASH:
do_tex_hash()
| [
"aperloff@physics.tamu.edu"
] | aperloff@physics.tamu.edu |
196ea254deb7d1aad9c213e8325c7620ff1af56d | 22f74928bece8b07b6054d4fb0692c34253a1c50 | /home/migrations/0006_lodging_category.py | 244ad595d979ee3d95564430463333bfb7f7e254 | [] | no_license | kairatGroupd/trip | 0810cae427226ef1f1f3b0224692cfec68c7a060 | 693965f5b77b61e861fb59955f5fb8b802a679d1 | refs/heads/main | 2023-06-17T14:18:23.026358 | 2021-07-12T13:37:41 | 2021-07-12T13:37:41 | 384,876,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | # Generated by Django 3.2.4 on 2021-07-03 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0005_rename_lodging_id_lodgingimage_lodging'),
]
operations = [
migrations.AddField(
model_name='lodging',
name='category',
field=models.CharField(choices=[('H', 'Hotel'), ('Y', 'Yurts'), ('A', 'Apartments'), ('O', 'Other')], max_length=1, null=True),
),
]
| [
"isma.kairat101@gmai.com"
] | isma.kairat101@gmai.com |
8e5c8e1e4d580f13690e76e7f786ec1a715f6eac | 69d2a8cacc4f6f615ed4d3ec849c5726547a6904 | /arrowkey.py | 5b29c76bbbf1cae342a3efef7c47753bbd7a345c | [] | no_license | MrBrokenHeart666/Arrowkey | 427e12e61b46c6cd4c7b9c8603453a6445c0cfea | cb06b543304c9083821ae9449d356b99042dd37f | refs/heads/master | 2022-11-18T18:46:55.762101 | 2020-07-16T17:01:22 | 2020-07-16T17:01:22 | 280,210,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | import os
from time import sleep
from getpass import getpass
while True :
os.system("clear")
print("+========================!!========================+")
print("Tools By MR.BR0K3NH34RT And u0_a117")
print("+========================!!========================+")
os.system("figlet Arrow Key")
print("+========================!!========================+")
print("""
1).Start Activation Of Arrow Key
2).About This Tools
3).Quit
""")
print("+========================!!========================+")
code = raw_input("Please Choose : ")
if code == "1":
print("[!]Warning[!]")
print("[!]Install At Your On Risk[!]")
os.mkdir('/data/data/com.termux/files/home/.termux')
key = "extra-keys = [['ESC','/','-','HOME','UP','END','PGUP'],['TAB','CTRL','ALT','LEFT','DOWN','RIGHT','PGDN']]"
kontol = open('/data/data/com.termux/files/home/.termux/termux.properties','w')
kontol.write(key)
kontol.close()
sleep(1)
os.system('termux-reload-settings')
print("Activation Of Arrow Key Successed!")
elif code == "2":
print("+========================!!========================+")
print("About This Tools")
print ("This Tools Created By MR.BR0K3NH34RT And U0_A117")
print("+========================!!========================+")
print("""
This Tools Make More Easier For Programmer Or Hacker To Do Some pentesting Or Tools
This Tools Will Add Arrow Key On Your Terminal
""")
print("Do You Still Want To Use This Tools?")
aku = raw_input(" [ Y/N ] : ")
if aku == "y":
print("Restarting Tools")
os.system("python2 arrowkey.py")
if aku == "n":
print("Bye Dude :)")
print("Have A Nice Day :)")
break
elif code == "3":
print("Bye Dude :)")
print("Have A Nice Day :)")
print("")
break
else:
print("Error 404(Not Found)")
| [
"noreply@github.com"
] | MrBrokenHeart666.noreply@github.com |
bc01b3a5e1cd3d723df3fc6d9c258a103d384d9f | a0f9579bbd51e5c8f1e0eb4e09cab42c62016b46 | /Max/app.py | 51ea177b0966b8ed1c2081d19d8fe41b165c9344 | [] | no_license | max-farver/March-Madness-Machine-Learning | 7d4de31f4041b333537cf9aa5acd31fec8f12c15 | aaf46520631343ef1590b319335fcc26b3286082 | refs/heads/master | 2021-07-17T12:17:48.784416 | 2020-09-14T13:41:43 | 2020-09-14T13:41:43 | 206,874,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
from graphs import score_by_game_dur, win_by_behind_ball_diff, \
win_by_defensive_half, score_by_on_ground, score_by_slow_speed
external_stylesheets = ['https://unpkg.com/material-components-web@latest/dist/material-components-web.min.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='Rocket League Analysis'),
# html.Div(children='''
# Dash: A web application framework for Python.
# '''),
dcc.Tabs(id="tabs", children=[
dcc.Tab(label='Score by Game Duration', children=[
score_by_game_dur
]),
dcc.Tab(label='Outcome by Difference of Time Spent Behind Ball', children=[
win_by_behind_ball_diff
]),
dcc.Tab(label='Score by Time Spent in Own Defensive Half', children=[
win_by_defensive_half
]),
dcc.Tab(label='Score by Time Spent on The Ground', children=[
score_by_on_ground
]),
dcc.Tab(label='Score by Time Spent Traveling at a Slow Speed', children=[
score_by_slow_speed
])
])
])
if __name__ == '__main__':
app.run_server(debug=True) | [
"mfarver99@gmail.com"
] | mfarver99@gmail.com |
f93b91fea4893a46b91d447536d0e2524f4ac946 | db9330ee475181ddbe4166ed331892043617086c | /qyer/get_city.py | 1b2ef5b711a2535a712134e3fbb547a702936115 | [] | no_license | Thorntan/common_scripts | a0693f1c7a3edde7a86f3802bebae5c5ef2d9119 | 37c7cadb07218e115e718bfa7b579111547e150c | refs/heads/master | 2020-12-25T14:38:28.174528 | 2016-08-09T07:34:44 | 2016-08-09T07:34:44 | 65,271,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # coding=utf-8
'''
@author: yanlihua
@date: 2015-10-27
@desc: 穷游单个国家城市URL的抓取
@update: 2015-10-27
'''
import sys
from util.Browser import MechanizeCrawler as MC
import re
from lxml import etree
from lxml import html
import codecs
import db_add
reload(sys)
sys.setdefaultencoding('utf-8')
def insert_db(name,name_en,url):
sql="replace into city(city_name,city_name_en,city_url) values (\'%s\',\'%s\',\'%s\')" % (name,name_en,url)
db_add.ExecuteSQL(sql)
def get_cities(country_name_en):
page_num = 1
is_next = 1
while (is_next == 1):
url = "http://place.qyer.com/"+country_name_en+"/citylist-0-0-" + str(page_num)
mc = MC()
page = mc.req('get',url,html_flag=True)
content = page.decode('utf-8')
tree = html.fromstring(content)
is_next = len(tree.find_class('ui_page_next'))
num = len(tree.xpath('//*[@class="plcCitylist"]/li'))
for i in range(1,num+1):
city_name = tree.xpath('//*[@class="plcCitylist"]/li[%s]/h3/a/text()' % i)[0].strip().encode('utf-8')
city_name_en = tree.xpath('//*[@class="plcCitylist"]/li[%s]/h3/a/span/text()' % i)[0].strip().encode('utf-8')
city_url = tree.xpath('//*[@class="plcCitylist"]/li[%s]/h3/a/@href' % i)[0].strip().encode('utf-8')
print city_name,city_name_en,city_url
#insert_db(city_name,city_name_en,city_url)
page_num += 1
if __name__ in '__main__':
country = 'australia'
country = 'fiji'
country_name_en = 'spain'
get_cities(country_name_en)
| [
"yanlihua@mioji.com"
] | yanlihua@mioji.com |
fef01d0dcc16558fefd00563b352cdb5ae4ac5b0 | edbb81518bf2f3d91b1f7c7af0127412aa9eba8c | /.DJvenv/lib/python3.5/imp.py | 7223ddabd5c5c3f6eb103633bce8640e39b937d4 | [] | no_license | 2357gi/mfdjango | dd7f78ff44c21e5ea5af48a121d2acdc02fa90b9 | efda5dcfad9d3d4a528770382ad977192c3dc2f6 | refs/heads/master | 2020-03-25T22:31:27.641096 | 2018-08-13T05:41:48 | 2018-08-13T05:41:48 | 144,227,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | /Users/oogikento/.pyenv/versions/3.5.1/lib/python3.5/imp.py | [
"oogi@torico-tokyo.com"
] | oogi@torico-tokyo.com |
286ea0d6086347e250089f5f817800bad2779b48 | ec3a1e3ee22d6100ed079a5d649b06d309cff889 | /points.py | bbe7c09834452cec77d089df1a2ab867039705ac | [] | no_license | roving99/robert2.0 | 1cf32fff75f1d8f853fb9ee742556a9bc005005d | b095deb0c50d8b64419a49d7f8aaa015fe0ea833 | refs/heads/master | 2020-03-09T15:12:01.866150 | 2018-04-10T01:18:03 | 2018-04-10T01:18:03 | 128,853,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,669 | py | import math
class Point(object):
"""
The class Point represents a 2D point
Class attributes: points
Instance attributes: x
y
"""
def __init__(self):
self.x = 0
self.y = 0
# Point.points.append(self)
def __init__(self, x=0, y=0):
self.x = x
self.y = y
# Point.points.append(self)
def __str__(self):
return '(%g, %g)' % (self.x, self.y)
# Special names methods. . .
# With this method defined, we can use + to add two point objects
# as in p1 + p2 which is equivalent to p1.__add__(p2)
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
# With this method defined, two point objects can be compared with >, <, and ==.
def __cmp__(self, other):
# compare them using the x values first
if self.x > other.x: return 1
if self.x < other.x: return -1
# x values are the same... check y values
if self.y > other.y: return 1
if self.y < other.y: return -1
# y values are the same too. . . it's a tie
return 0
# Other general methods
def distance_from_origin(self):
return math.sqrt(self.x * self.x + self.y * self.y)
def distance(self, other):
dx = math.fabs(self.x - other.x)
dy = math.fabs(self.y - other.y)
return math.sqrt(dx * dx + dy * dy)
def distance2(self, other):
dx = math.fabs(self.x - other.x)
dy = math.fabs(self.y - other.y)
return (dx * dx + dy * dy)
def isIn1stQuad(self):
return (self.x > 0) and (self.y > 0)
def asPair(self):
return (self.x, self.y)
class Cloud(object):
def __init__(self, c=None):
# ingest 'old style' LIDAR cloud {angle:(x,y), angle:(x,y),...}
self.set = []
if c:
for p in c:
self.set.append(Point(c[p][0], c[p][1]))
def __str__(self):
s = "[ "
for p in self.set:
# s = s+"("+str(p.x)+', '+str(p.y)+"), "
s = s+str(p)+', '
s = s+"]"
return str(s)
def append(self, p):
self.set.append(p)
def pointsWithin(self, pos, radius):
#returns cloud of points within radius of pos.
result = Cloud()
centre = Point(pos[0], pos[1])
d2 = radius*radius # save on a sqrt operation.
for p in self.set:
if centre.distance2(p)<d2:
result.append(p)
return result
def xs(self): # list of all x values
result = []
for p in self.set:
result.append(p.x)
return result
def ys(self): # list of all y values
result = []
for p in self.set:
result.append(p.y)
return result
def mean(self):
n = len(self.set)
if n==0:
return Point()
else:
return Point(x=sum(self.xs())/n, y=sum(self.ys())/n)
def leastSquare(self):
'''
determine line (form y = mx+b) best fitting scatter data x,y
'''
n = len(self.set)
if n==0:
return None, None
meanX = sum(self.xs())/n
meanY = sum(self.ys())/n
top = 0.0
bottom = 0.0
for p in self.set:
top = top+(p.x-meanX)*(p.y-meanY)
bottom = bottom + (p.x-meanX)**2
if abs(bottom)<0.0000001:
bottom = 0.0000001
m = top/bottom
b = meanY - m*meanX
return m, b
if __name__=="__main__":
import graph
import pygame
import neato
cloud = [
Point(3,2),
Point(2,8),
Point(7,5),
]
test= {0: (1167, 256), 1: (1162, 272), 2: (1156, 265), 3: (1152, 268), 4: (1147, 275), 5: (1144, 265), 6: (1141, 261), 7: (1138, 274), 8: (1135, 268), 9: (1132, 267), 10: (1130, 271), 11: (1129, 281), 12: (1128, 274), 13: (1127, 279), 14: (1127, 275), 15: (1128, 275), 16: (1128, 287), 17: (1128, 275), 18: (1129, 277), 19: (1129, 280), 20: (1132, 279), 21: (1135, 267), 22: (1137, 274), 23: (1140, 264), 24: (1142, 271), 25: (1147, 256), 26: (1152, 261), 27: (1156, 257), 28: (1161, 260), 29: (1167, 257), 30: (1171, 261), 31: (1179, 255), 32: (1184, 253), 33: (1191, 257), 34: (1199, 251), 35: (1207, 250), 36: (1214, 243), 37: (1225, 244), 38: (1233, 237), 39: (1244, 236), 40: (1256, 233), 41: (1267, 225), 42: (1278, 222), 43: (1291, 199), 44: (1304, 191), 45: (1318, 198), 46: (1335, 180), 47: (1351, 193), 48: (1368, 193), 49: (1385, 180), 50: (1403, 158), 51: (1422, 143), 52: (1440, 164), 53: (1459, 152), 54: (1484, 153), 55: (1506, 143), 56: (1529, 128), 57: (1556, 145), 58: (1585, 121), 59: (1613, 123), 60: (1641, 111), 61: (1674, 114), 62: (1707, 123), 63: (1727, 117), 64: (1695, 124), 65: (1669, 123), 66: (1646, 133), 67: (1623, 130), 68: (1599, 131), 69: (1582, 156), 70: (1557, 143), 71: (1540, 166), 72: (1522, 175), 73: (1504, 146), 74: (1488, 164), 75: (1471, 156), 76: (1454, 166), 77: (1438, 169), 78: (1461, 63), 79: (1520, 57), 80: (1589, 61), 81: (1659, 49), 82: (1735, 47), 83: (1817, 30), 84: (1915, 16), 85: (1825, 55), 86: (2144, 12), 87: (2263, 8), 90: (2083, 9), 91: (2059, 15), 93: (2246, 13), 95: (4509, 9), 96: (4432, 12), 97: (4470, 12), 105: (4197, 14), 106: (4045, 9), 116: (4707, 8), 133: (4249, 17), 134: (4287, 15), 135: (4388, 11), 136: (4459, 13), 139: (1823, 96), 140: (1813, 111), 141: (1822, 85), 142: (2387, 37), 143: (2316, 36), 144: (2265, 33), 145: (2217, 45), 146: (2173, 62), 147: (2120, 52), 148: (2082, 54), 149: (2045, 61), 150: (2008, 59), 151: (1966, 64), 152: (1935, 85), 153: (1981, 72), 154: (2017, 71), 155: (2076, 67), 156: (2088, 37), 157: (2147, 12), 158: (2196, 13), 159: (2260, 9), 160: (2308, 7), 161: (2403, 12), 162: (2467, 17), 163: (2591, 10), 165: (2786, 27), 166: (2821, 8), 190: (3346, 16), 191: (3344, 19), 218: (4825, 13), 219: (4624, 6), 220: (4351, 17), 221: (4151, 8), 222: (4108, 11), 223: (3965, 12), 224: (3809, 13), 225: (3675, 12), 226: (3602, 19), 227: (3480, 22), 228: (3346, 18), 229: (3250, 24), 230: (3189, 28), 231: (3107, 27), 232: (2990, 33), 233: (2940, 39), 234: (2881, 32), 235: (2818, 43), 236: (2761, 33), 237: (2698, 46), 238: (2633, 47), 239: (2581, 54), 240: (2532, 46), 241: (2502, 55), 242: (2453, 53), 243: (2451, 56), 244: (2514, 63), 245: (2580, 47), 246: (2651, 41), 247: (2707, 35), 248: (2776, 38), 249: (2842, 55), 250: (2830, 53), 251: (2789, 51), 252: (2767, 55), 253: (2741, 67), 254: (2697, 70), 255: (2676, 50), 256: (2645, 76), 257: (2621, 57), 258: (2589, 63), 259: (2584, 77), 260: (2553, 79), 261: (2531, 80), 262: (2503, 65), 263: (2491, 93), 264: (2470, 43), 265: (2456, 88), 266: (2445, 84), 267: (2348, 97), 268: (2344, 92), 269: (2306, 105), 270: (2309, 83), 271: (2312, 92), 272: (2302, 83), 273: (2304, 83), 274: (2270, 97), 275: (2275, 94), 276: (2261, 97), 277: (2241, 113), 278: (2245, 122), 279: (2250, 111), 280: (2245, 99), 281: (2225, 137), 282: (2241, 134), 283: (2242, 107), 284: (2236, 107), 285: (2241, 112), 286: (2244, 97), 287: (2238, 102), 288: (2246, 108), 289: (2251, 120), 290: (2262, 105), 291: (2259, 103), 292: (2261, 106), 293: (2265, 100), 294: (2276, 93), 295: (2286, 93), 296: (2299, 99), 297: (2303, 104), 298: (2320, 92), 299: (2328, 78), 300: (2340, 91), 301: (2353, 92), 302: (2373, 83), 303: (2381, 81), 304: (2397, 75), 305: (2411, 81), 306: (2438, 72), 307: (2454, 76), 308: (2481, 77), 309: (2495, 75), 310: (2521, 77), 311: (2530, 74), 312: (2580, 60), 313: (2602, 67), 314: (2610, 56), 315: (4380, 17), 316: (4187, 20), 317: (4301, 16), 318: (4245, 16), 319: (4242, 15), 320: (4214, 21), 321: (4102, 23), 322: (3983, 12), 323: (4086, 27), 324: (4157, 16), 325: (4161, 17), 326: (1726, 58), 327: (1688, 119), 328: (1654, 113), 329: (1624, 126), 330: (1597, 136), 331: (1568, 127), 332: (1543, 140), 333: (1521, 146), 334: (1498, 149), 335: (1472, 157), 336: (1451, 158), 337: (1433, 163), 338: (1412, 167), 339: (1397, 168), 340: (1377, 204), 341: (1362, 174), 343: (711, 66), 344: (693, 179), 345: (683, 212), 346: (674, 201), 347: (670, 278), 348: (666, 334), 349: (667, 337), 350: (669, 289), 351: (676, 220), 352: (687, 154), 353: (698, 79), 354: (719, 64), 355: (738, 19), 356: (1191, 249), 357: (1186, 238), 358: (1180, 263), 359: (1172, 257)}
testCloud = neato.toCloud(neato.prune(test))
for pt in cloud:
print pt
print cloud[0], ' +', cloud[1], ' =', cloud[0]+cloud[1]
print cloud[0], ' ==', cloud[1], ' ?', cloud[0]==cloud[1]
print cloud[0], ' >', cloud[1], ' ?', cloud[0]>cloud[1]
print cloud[0], ' <', cloud[1], ' ?', cloud[0]<cloud[1]
print cloud[2], ' ==', cloud[2], ' ?', cloud[2]==cloud[2]
print 'length of', cloud[0], ' =', cloud[0].distance_from_origin()
print 'length of', cloud[1], ' =', cloud[1].distance_from_origin()
myCloud = Cloud(testCloud)
print 'TEST--------------------------'
print test
print 'CLOUD-------------------------'
print myCloud
print graph.WHITE
selection = Cloud() # points selected using mouse.
pygame.init()
size = (800,800)
screen = pygame.display.set_mode(size)
pygame.display.set_caption('Points')
done = False
clock = pygame.time.Clock()
m, b = None, None
displayZoom = 0.1
myGraph = graph.Graph((800,800), origin=(400,400), scale=displayZoom)
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
done = True
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
print myGraph.mouseToGraph(pos)
selection = myCloud.pointsWithin(myGraph.mouseToGraph(pos), 500)
m,b = selection.leastSquare()
screen.fill(graph.RED)
myGraph.draw()
for p in myCloud.set:
myGraph.draw_circle(graph.WHITE, p.asPair(), 1)
for p in selection.set:
myGraph.draw_circle(graph.RED, p.asPair(), 1)
myGraph.draw_circle(graph.PURPLE, selection.mean().asPair(), 2)
if m:
myGraph.draw_line_mb(graph.GREEN, m, b, 1)
screen.blit(myGraph.surface, (0,0))
pygame.display.flip()
clock.tick(10) # limit to 10 fps (plenty fast enough)
pygame.quit()
| [
"noreply@github.com"
] | roving99.noreply@github.com |
767c331b4931318068b200f627e229b4cfa8bb86 | 32b8a26d7b6bcd34f109910fac651520aec8f23f | /token_manager.py | 4d6168274dbd2d5c48457a7844040026c3afc036 | [] | no_license | HU-MA/nextbox | 52571ebd5c3a4a72eefb3296f25d09b982ee6f78 | 82a81512b8810de60a1f45834da5fa8523911f03 | refs/heads/master | 2023-04-09T17:55:35.163917 | 2021-04-21T15:02:42 | 2021-04-21T15:02:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py |
import os
import yaml
import socket
import sys
import re
hostname = socket.gethostname()
user = os.environ["USER"]
# read registry
with open("registry.yaml") as fd:
registry = yaml.safe_load(fd)
# check legal user
if registry["user"] != user:
print("local user does not match the one in the registry!")
sys.exit(1)
# check legal host
if registry["host"] != hostname:
print("hostname does not match the one in the registry!")
sys.exit(1)
# open token file and import all tokens
with open(registry["token_file"]) as fd:
TOKENS = [tok.strip() for tok in fd.readlines()]
# check for passed serial
if len(sys.argv) != 2:
print("no serial number provided")
print("Usage: python3 token_manager.py <serial>")
sys.exit(1)
# check for valid serial
serial = sys.argv[1]
if not re.match("^[0-9]{8}$", serial):
print("provided invalid serial number")
sys.exit(1)
# get token
token_idx = len(registry["serials"]) + registry["offset"]
token = TOKENS[token_idx]
# create registry entry
new_entry = {
"serial": serial,
"token": token,
}
# create nextbox.conf
with open("nextbox.conf.tmpl") as fd:
nextbox_conf = fd.read() \
.replace("%%TOKEN%%", token) \
.replace("%%SERIAL%%", serial)
# write nextbox.conf
with open("nextbox.conf", "w") as fd:
fd.write(nextbox_conf)
# update and save registry
registry["serials"].append(new_entry)
with open("registry.yaml", "w") as fd:
yaml.dump(registry, fd)
print("SUCCESS, created nextbox.conf and updated registry")
sys.exit(0)
| [
"coder@safemailbox.de"
] | coder@safemailbox.de |
ea39051bc7569500c51c1e9a6d1f2602805a4eba | 29e381cbad0d8e97a236f78179869eab3d0510df | /practice/Day08/untitled/1-窗口控制/2-控制窗体大小和位置.py | 24366e835ab709201c17412d89d0d0714a96a418 | [] | no_license | SissiCheng/Python_Learning | 843dbc59678737712de3e3274eeb4f30e75090c2 | 57c7ef5f44f183fc09b50fb4b181d2947b739e15 | refs/heads/master | 2020-12-02T22:11:30.818020 | 2018-02-28T13:55:07 | 2018-02-28T13:55:07 | 96,094,091 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | import win32gui
| [
"18668352401@163.com"
] | 18668352401@163.com |
9f7324363c3eac6287a746b66951f2fd28802a31 | b3df40dd032e048fc13d85636257278d1531e482 | /SWEA/SWEA_wordWhere.py | 778a543c021123c157cb894b38bb25692335dfe6 | [] | no_license | humbleYoon/algorithm | e932f296095769cf73275d48ba4e027247c3e594 | 8d63f3e64f82daabc0b8eb2384dc02ab40d88356 | refs/heads/master | 2022-11-06T13:05:01.475764 | 2020-06-21T17:19:34 | 2020-06-21T17:19:34 | 273,423,506 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | T = int(input())
for test_case in range(T):
N, K = map(int, input().split())
info_list = []
result = 0
for i in range(N):
row_list = list(map(int, input().split()))
info_list.append(row_list)
for i in range(N):
for j in range(N):
if info_list[i][j] == 1:
idx = j+1
length = 1
if j-1 == -1:
while idx < N and info_list[i][idx] == 1:
idx += 1
length += 1
else:
if info_list[i][j-1] == 0:
while idx < N and info_list[i][idx] == 1:
idx += 1
length += 1
if length == K:
result += 1
idx = i+1
length = 1
if i-1 == -1:
while idx < N and info_list[idx][j] == 1:
idx += 1
length += 1
else:
if info_list[i-1][j] == 0:
while idx < N and info_list[idx][j] == 1:
idx += 1
length += 1
if length == K:
result += 1
print("#{0} {1}".format(test_case+1, result)) | [
"rladbswo1124@gmail.com"
] | rladbswo1124@gmail.com |
a27d8154ba2ec12b8f6f1ad4190a60de6f241358 | fe4c65df15cefe67d4d2c3c50bde72d0d2126e70 | /Python/70.climbing-stairs.py | 9f6080aa63bb054380340ce44aa4630ff16ad5e9 | [] | no_license | sklanier/Leetcode | 28b20123c18392ac710f794f38ea13ba51979b96 | a5762ffa6a698042ed19a0a2f538bbde6a01d2d1 | refs/heads/master | 2023-06-09T20:41:29.613890 | 2021-06-29T17:57:57 | 2021-06-29T17:57:57 | 373,676,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | #
# @lc app=leetcode id=70 lang=python3
#
# [70] Climbing Stairs
#
# @lc code=start
class Solution:
def climbStairs(self, n):
a, b = 1, 1
for i in range(n):
a, b = b, a + b
return a
# @lc code=end
| [
"stevekentlanier@gmail.com"
] | stevekentlanier@gmail.com |
fb708659d8576b28acdb88c0439ca493e36c5884 | 30c524146ac7c240b3f69a856a12f9d971e2f294 | /setup.py | a7138c22975f00b0e0c89fc5a9121d3aa768c383 | [
"MIT"
] | permissive | undercertainty/ipython_magic_sqlalchemy_schemadisplay | 7da1400b4b9cff520b3e185345c204f14ccb512d | bc22060f3125736eecf2cc4d7972eca9715fc0c3 | refs/heads/master | 2021-10-10T07:00:04.925288 | 2019-01-07T23:01:31 | 2019-01-07T23:01:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from setuptools import setup
setup(name='schemadisplay-magic',
packages=['schemadisplay_magic'],
install_requires=['ipython-sql', 'sqlalchemy_schemadisplay', 'graphviz'],
dependency_links=['git+https://github.com/fschulze/sqlalchemy_schemadisplay.git']
) | [
"tony.hirst@gmail.com"
] | tony.hirst@gmail.com |
f30723fa2b4402bc88c5b0c9fd11a11c4eeef26b | 4747abf60b17c462e29d28d3f370398b7932a790 | /mock_solution.py | 54b504e859c307afc7c2cabce7369cfb0b7bae6b | [] | no_license | rachit-mishra/personal-training | 1bb288255e2ad3028f3d2b63ea4018687ab44ed3 | 8e728d0fee05138ee6cc1639f79a90ad1b97c42c | refs/heads/master | 2021-02-26T13:21:12.126525 | 2020-03-06T22:31:18 | 2020-03-06T22:31:18 | 245,528,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,659 | py |
########### Python Form Recognizer Async Layout #############
"""
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/contoso-allinone.jpg
key:a527f116932642ae9cc5b2940a61cedb
endpoint: https://zsbttrackmeeting.cognitiveservices.azure.com/
"""
import json
import time
from requests import get, post
import urllib.request
# Endpoint URL
endpoint = r"https://zsbttrackmeeting.cognitiveservices.azure.com/"
apim_key = "a527f116932642ae9cc5b2940a61cedb"
post_url = endpoint + "/formrecognizer/v2.0-preview/prebuilt/receipt/analyze"
source = r"https://raw.githubusercontent.com/Azure-Samples/cognitive-services-REST-api-samples/master/curl/form-recognizer/contoso-allinone.jpg"
print(source)
headers = {
# Request headers
'Content-Type': 'image/jpg',
'Ocp-Apim-Subscription-Key': "a527f116932642ae9cc5b2940a61cedb",
}
print("Printing headers.......{}".format(headers))
params = {
"includeTextDetails": True
}
with urllib.request.urlopen(source) as url:
data_bytes=url.read()
print(data_bytes)
# with open('temp.jpg', 'wb') as f:
# f.write(url.read())
# file = urllib.request.urlopen(source)
# data_bytes = file.read()
# print(data_bytes)
try:
resp = post(url = post_url, data = data_bytes, headers = headers, params = params)
print("Printing the response here.........................................................")
print(resp.status_code)
if resp.status_code != 202:
print("POST analyze failed:\n%s" % resp.text)
quit()
print("POST analyze succeeded:\n%s" % resp.headers)
get_url = resp.headers["operation-location"]
except Exception as e:
print("POST analyze failed:\n%s" % str(e))
quit()
def get_analyzed_results():
n_tries = 5
n_try = 0
wait_sec = 6
while n_try < n_tries:
try:
resp = get(url = get_url, headers = {"Ocp-Apim-Subscription-Key": apim_key})
resp_json = json.loads(resp.text)
print(resp_json)
if resp.status_code != 200:
print("GET Layout results failed:\n%s" % resp_json)
quit()
status = resp_json["status"]
if status == "succeeded":
print("Layout Analysis succeeded:\n%s" % resp_json)
quit()
if status == "failed":
print("Analysis failed:\n%s" % resp_json)
quit()
# Analysis still running. Wait and retry.
time.sleep(wait_sec)
n_try += 1
except Exception as e:
msg = "GET analyze results failed:\n%s" % str(e)
print(msg)
quit()
return 1
| [
"noreply@github.com"
] | rachit-mishra.noreply@github.com |
0e0c792492d3e94d7a531b61be9d5c77c9a54835 | d9576f90b1a885cd803076d08731306dd90fb623 | /yatsm_master/yatsm/errors.py | a2c27eb6cef1f9612d57c35a9588e577896d7289 | [
"MIT"
] | permissive | sguo1129/robust_regression_python | 836a1c38ec0868d5d33f3668d686cf040e8ee34e | e2d3ea25264d27c8d44bfa051f694b4c0c0ef7a4 | refs/heads/master | 2022-12-06T16:45:00.581762 | 2017-03-08T19:50:17 | 2017-03-08T19:50:17 | 84,358,355 | 0 | 1 | null | 2022-11-21T14:09:23 | 2017-03-08T19:30:56 | Python | UTF-8 | Python | false | false | 568 | py | class TSLengthException(Exception):
""" Exception stating timeseries does not contain enough observations
"""
pass
class TrainingDataException(Exception):
""" Custom exception for errors with training data """
pass
class AlgorithmNotFoundException(Exception):
""" Custom exception for algorithm config files without handlers """
pass
class InvalidConfigurationException(Exception):
pass
class PipelineConfigurationError(TypeError):
""" Exception for invalid ``require``/``output`` specification in pipeline
"""
pass
| [
"SGUO@USGS.GOV"
] | SGUO@USGS.GOV |
1b3d280c7403941d0bf096038fcd3c6fb955bb16 | c588305899ff4bc1d24c2bc213edce1c16621113 | /21/21_1.py | 02b5535cb8428832fa0ea9383dad49ac806703b3 | [] | no_license | nemesmarci/Advent-of-Code-2015 | fa2953916e67d6ad5b3218de1bc7418ff942ab6a | 53db8d0e0140f94a80d307b3cec3e065a235ba53 | refs/heads/master | 2021-12-31T14:08:52.640576 | 2020-01-10T18:13:59 | 2021-12-29T19:35:09 | 160,928,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | from common import find_cost
print(find_cost())
| [
"nemes@sch.bme.hu"
] | nemes@sch.bme.hu |
5af412d9d09e2346e325dfcceae3c46451e333af | 69b7fa46be90010a03a7ba4d8d4e0a27f171ffc1 | /CSC 138/Socket Assignment 1/udpserver.py | ab80acc99342508e62f468fbe963c8250058cea3 | [] | no_license | breeanageorge/college-classes | cd952e2d2a26560e4cba613ce1d40f4dee94d000 | e8fe5bb5971e7538c9d3ed4ce12d9b20861605b7 | refs/heads/master | 2020-05-18T10:28:46.232649 | 2019-05-01T04:09:46 | 2019-05-01T04:09:46 | 184,353,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | from socket import*
serverPort = 12000
serverSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket.bind(('', serverPort))
print("The server is ready to receive")
while True:
message, clientAddress = serverSocket.recvfrom(2048)
modifiedMessage = message.decode().upper()
serverSocket.sendto(modifiedMessage.encode(), clientAddress) | [
"37132204+breeanageorge@users.noreply.github.com"
] | 37132204+breeanageorge@users.noreply.github.com |
f12ecdec195d21b07ddb86e45226d52b6dbaf079 | a5c2f4ada2fb4436784a785a5d598546d3b3284c | /Main/migrations/0001_initial.py | 305f99beddc1606764e5d5472f2e0f219b5ffacf | [] | no_license | sakibovi123/chat_applicationv1.0.1 | 1c5d25c1229434b4c6019fcf4dbabf53324d90df | 7b5db530e22743959df215347ff1e644cbbfb4e0 | refs/heads/master | 2023-07-13T22:22:02.295141 | 2021-08-28T07:35:27 | 2021-08-28T07:35:27 | 396,916,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | # Generated by Django 3.2.6 on 2021-08-16 11:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ChatRoom',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('message', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"sakibovi123@gmail.com"
] | sakibovi123@gmail.com |
1988f3bfa396617797d1effd273ed01d83a05ec9 | 92acb2bdfcdb594a7f98b24093f4711879e956ca | /dvaapp/admin.py | 7be6bf0aa9a7a506025392cfac7e62ea6530b6cf | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | cynwpu/DeepVideoAnalytics | e1f0b2e00a2671014bdcae99bf11c180bf35a30e | c95913a2967d6d17e71bb1b703f99c00c483bcdc | refs/heads/master | 2021-05-05T15:04:50.650488 | 2017-09-10T20:01:31 | 2017-09-10T20:01:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | from django.contrib import admin
from .models import Video, Frame, TEvent, IndexEntries, QueryResults, DVAPQL, VDNServer,\
LOPQCodes, Region, Tube, Detector, Segment, DeletedVideo, \
VideoLabel, FrameLabel, RegionLabel, TubeLabel, SegmentLabel, Label, ManagementAction, \
StoredDVAPQL, Analyzer, Indexer, Retriever, SystemState, Worker
@admin.register(SystemState)
class SystemStateAdmin(admin.ModelAdmin):
pass
@admin.register(Worker)
class WorkerAdmin(admin.ModelAdmin):
pass
@admin.register(Label)
class LabelAdmin(admin.ModelAdmin):
pass
@admin.register(VideoLabel)
class VideoLabelAdmin(admin.ModelAdmin):
pass
@admin.register(FrameLabel)
class FrameLabelAdmin(admin.ModelAdmin):
pass
@admin.register(SegmentLabel)
class SegmentLabelAdmin(admin.ModelAdmin):
pass
@admin.register(RegionLabel)
class RegionLabelAdmin(admin.ModelAdmin):
pass
@admin.register(TubeLabel)
class TubeLabelAdmin(admin.ModelAdmin):
pass
@admin.register(Segment)
class SegmentAdmin(admin.ModelAdmin):
pass
@admin.register(Region)
class RegionAdmin(admin.ModelAdmin):
pass
@admin.register(Video)
class VideoAdmin(admin.ModelAdmin):
pass
@admin.register(DeletedVideo)
class DeletedVideoAdmin(admin.ModelAdmin):
pass
@admin.register(QueryResults)
class QueryResultsAdmin(admin.ModelAdmin):
pass
@admin.register(DVAPQL)
class DVAPQLAdmin(admin.ModelAdmin):
pass
@admin.register(Frame)
class FrameAdmin(admin.ModelAdmin):
pass
@admin.register(IndexEntries)
class IndexEntriesAdmin(admin.ModelAdmin):
pass
@admin.register(VDNServer)
class VDNServerAdmin(admin.ModelAdmin):
pass
@admin.register(TEvent)
class TEventAdmin(admin.ModelAdmin):
pass
@admin.register(LOPQCodes)
class LOPQCodesAdmin(admin.ModelAdmin):
pass
@admin.register(Tube)
class TubeAdmin(admin.ModelAdmin):
pass
@admin.register(Detector)
class DetectorAdmin(admin.ModelAdmin):
pass
@admin.register(Analyzer)
class AnalyzerAdmin(admin.ModelAdmin):
pass
@admin.register(Indexer)
class IndexerAdmin(admin.ModelAdmin):
pass
@admin.register(Retriever)
class RetrieverAdmin(admin.ModelAdmin):
pass
@admin.register(ManagementAction)
class ManagementActionAdmin(admin.ModelAdmin):
pass
@admin.register(StoredDVAPQL)
class StoredDVAPQLAdmin(admin.ModelAdmin):
pass
| [
"akshayubhat@gmail.com"
] | akshayubhat@gmail.com |
73f303388182067a9dc0400a9f3b73c4a3e19c4d | fc6b7f72bd86d0707618802aa328cce5d22301f2 | /Physics-3s-2model/task1/F_P_L_M.py | 8f6b36c4acdde02c8ab4cac10293650aa6c6bec6 | [] | no_license | JoJukov/PythonStuff | ff7c942dfc1867966cc6571328914dbd086c84f5 | 33c7ee0242cda67329e705194ea6db561359b715 | refs/heads/master | 2023-05-23T02:49:52.478605 | 2021-06-14T13:50:04 | 2021-06-14T13:50:04 | 370,728,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | from math import sin, cos, radians
def p_l_m(l: float, m: float):
if l == 0 and m == 0:
return lambda x: 1
elif l == 1 and m == 0:
return lambda x: cos(radians(x))
elif l == 1 and m == 1:
return lambda x: -sin(radians(x))
elif l == 2 and m == 0:
return lambda x: 0.5 * (3 * (cos(radians(x)))**2 - 1)
elif l == 2 and m == 1:
return lambda x: -3 * cos(radians(x)) * sin(radians(x))
elif l == 2 and m == 2:
return lambda x: 3 * (sin(radians(x)))**2
elif l == 3 and m == 0:
return lambda x: 0.5 * (5 * (cos(radians(x)))**3 - 3 * cos(radians(x)))
elif l == 3 and m == 1:
return lambda x: -1.5 * (5 * (cos(radians(x)))**2 - 1) * sin(radians(x))
elif l == 3 and m == 2:
return lambda x: 15 * cos(radians(x)) * sin(radians(x))**2
elif l == 3 and m == 3:
return lambda x: -15 * sin(radians(x))**3
elif l == 4 and m == 0:
return lambda x: 0.125 * (35 * cos(radians(x))**4 - 30 * cos(radians(x))**2 + 3)
else:
assert ValueError("BL with such input data isn't exists")
| [
"ivanzhukov2002@mail.ru"
] | ivanzhukov2002@mail.ru |
9d92e70396e788812023e3d33729af63343612b6 | 94f1e31beaa6941844d6966ca654097c2fe12a0d | /ASEN5090/HW5/HW5.py | 0cf52882a4ee895d6392057d44788bee17a0543d | [] | no_license | mfkiwl/RFLab | 834e2881cb770adc4f739248b8c36fc9a8b0f0f2 | cae3af6db7b970246bb9a2b5d733889b1af3c7a4 | refs/heads/master | 2023-01-21T19:25:32.798558 | 2020-12-01T01:38:25 | 2020-12-01T01:38:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,733 | py | from read_ephemeris import parse_rinex
from rinex_utilities import parse_rinex_nav_file, parse_rinex_obs_file
import numpy as np
from matplotlib import pyplot as plt
from readyuma import mean2eccentric, broadcast2pos, read_GPSyuma
import pandas as pd
from hw5_helpers import *
from matplotlib.dates import (YEARLY, DateFormatter,
rrulewrapper, RRuleLocator, drange)
MY_SATT = "G02"
# MY_SATT = "G30"
PRN = int(MY_SATT[1:len(MY_SATT)])
##############################################################################################################
# Part 1
# Starting point: Choose a good PRN from the RINEX obs file to work with for this assignment.
header, almanac_data = parse_rinex_nav_file('brdc2450.20n') # Grab data
# print(almanac_data)
_, obs_data = parse_rinex_obs_file("nist2450.20o") # Grab even more data
base = np.datetime64("2020-09-01T00:00:00") # Weird Time Thing
seconds = (obs_data[MY_SATT].time - base)/1e6 + 172800 # Weird Time Thing
weekComp = np.ones(len(seconds))*2121 # Weird Time Thing
_, sattPos, _, _ = broadcast2posM(almanac_data,np.array([weekComp,seconds.astype(float)]).transpose(),PRN) # Grab Satellite Position Using Revised Broadcast2Pos
rxPos = get_nistECEF() # Grabs ECEF coords of NIST Location
azelrange_rx2satt = pd.DataFrame(azelrange(rxPos, sattPos), columns = ['Azimuth', 'Elevation', 'Range']) # Az/El/Range between sat and rx
# Create a time vector just including the times for which you have an observation for the satellite.
# visibility_idx = (azelrange_rx2satt['Elevation'] > 0) & (azelrange_rx2satt['Elevation'] < 180)
# seconds = seconds[visibility_idx]
# azelrange_rx2satt = azelrange_rx2satt[visibility_idx]
# sattPos = sattPos[visibsility_idx]
# azelrange_rx2satt.to_excel('revised'+MY_SATT+'.xlsx') # Export for sanity
# Compute the expected range for each of these times.
satExpectedRange = np.zeros(len(seconds))
bsv = np.zeros(len(seconds))
relsv = np.zeros(len(seconds))
for i in range(len(seconds)):
satExpectedRange[i], bsv[i], relsv[i] = ExpectedRange(sattPos[i,:], rxPos,seconds[i].astype(float), almanac_data, PRN)
# Compute and plot the difference between the measured C/A code pseudorange and the
# expected range (R in meters) versus time (hours) for Sept 1, 2020. What should it look like?
C1 = obs_data[MY_SATT].signals["L1"].pr
P2 = obs_data[MY_SATT].signals["L2"].pr
dPR0 = C1 - satExpectedRange
# dPR0 = C1[visibility_idx] - satExpectedRange
fig, ax = plt.subplots()
ax.set_title("DPR (m) PRN%s" % PRN)
plt.scatter((seconds.astype(float) -172800)/3600 , dPR0, 3, label = "dPR0")
plt.legend(markerscale=2.)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("DPR (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT1.png' % (PRN, PRN))
##############################################################################################################
# Part 2: Clock error: Write a function to compute the satellite clock correction (meters) based on the a0 and a1 terms in
# the broadcast epheremis. Alternatively, you can add the SV clock correction capability to broadcast_eph2pos and provide bsv as an
# additional output
# Plot the SV clock correction (meters) versus time (hours) for Sept 1, 2020. What should it look like?
fig, ax = plt.subplots()
ax.set_title("Clock Correction (m) PRN%s" % PRN)
plt.scatter((seconds.astype(float) -172800)/3600 , bsv, 3)
plt.legend(markerscale=2.)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("Clock Correction (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT2.png' % (PRN, PRN))
dPR1 = C1 - (satExpectedRange - bsv)
fig, ax = plt.subplots()
ax.set_title("DPR (m) PRN%s" % PRN)
plt.scatter((seconds.astype(float) -172800)/3600 , dPR1, 3, label = "dPR1-CLOCK")
plt.legend(markerscale=2.)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("Difference in Psuedo Range (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT3.png' % (PRN, PRN))
##############################################################################################################
# Part 3: Relativity: Write a function or augment broadcast_eph2pos to compute the relativistic correction
# (meters) based on the orbital elements in the ephemeris data
dPR1 = C1 - (satExpectedRange - bsv)
dPR2 = C1 - (satExpectedRange - bsv - relsv)
fig, ax = plt.subplots()
plt.scatter((seconds.astype(float) -172800)/3600 , relsv, 3)
# ax.set_xlim(6, 12)
# ax.set_ylim(min(relsv), max(relsv))
ax.set_title("Relativistic Correction (m) PRN%s" % PRN)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("Relativistic Correction (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT4.png' % (PRN, PRN))
fig, ax = plt.subplots()
ax.set_title("DPR (m) PRN%s" % PRN)
plt.scatter((seconds.astype(float) -172800)/3600 , dPR1, 3, label = "dPR1-CLOCK")
plt.scatter((seconds.astype(float) -172800)/3600 , dPR2, 3, label = "dPR2-RELATIVITY")
plt.legend(markerscale=2.)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("Difference in Psuedo Range (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT5.png' % (PRN, PRN))
##############################################################################################################
# Part 4: . Troposphere: Write a function to compute the simple tropospheric delay model (meters) based on the satellite
# elevation and an assumed zenith delay for the ground station location. For the NIST ground station you may assume the
# zenith value is 2 m.
zd = 2
tropo = zd/np.sin(np.deg2rad(azelrange_rx2satt['Elevation']))
fig, ax = plt.subplots()
plt.scatter((seconds.astype(float) -172800)/3600 , tropo, 3)
# ax.set_xlim(6, 12)
# ax.set_ylim(min(tropo), max(tropo))
ax.set_title("Tropo Correction (m) PRN%s" % PRN)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("Tropo Correction (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT6.png' % (PRN, PRN))
# tropo = tropomodel(np.array([weekComp,seconds.astype(float)]).transpose(), zd=2)
# Plot the tropo correction (meters) versus time (hours) for Sept 1, 2020. What should it look like?
dPR1 = C1 - (satExpectedRange - bsv)
dPR2 = C1 - (satExpectedRange - bsv - relsv)
dPR3 = C1 - (satExpectedRange - bsv - relsv + tropo)
# Compute and plot dPR3 = C1 – (R – bsv - relsv + tropo)
fig, ax = plt.subplots()
ax.set_title("DPR (m) PRN%s" % PRN)
plt.scatter((seconds.astype(float) -172800)/3600 , dPR1, 3, label = "dPR1-CLOCK")
plt.scatter((seconds.astype(float) -172800)/3600 , dPR2, 3, label = "dPR2-RELATIVITY")
plt.scatter((seconds.astype(float) -172800)/3600 , dPR3, 3, label = "dPR3-TROPO")
plt.legend(markerscale=2.)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("Difference in Psuedo Range (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT7.png' % (PRN, PRN))
##############################################################################################################
# Part 5: Ionosphere: For the ionospheric correction, rather than using a model-based correction (like Klobuchar) we
# will combine GPS pseudorange measurements into an ionosphere free combination.
# Write a function to compute the iono-free combination of the pseudoranges reported in the RINEX obs file and
# also the iono correction based on these two measurements. To allow your function to also be used for L5
# measurements you can pass into it the carrier frequencies as well as the measurements.
# [PRIF, iono] = ionocorr (C1, f1, P2, f2) *high precision people tend to call the iono-free combo P3
L1 = 1575.42e6
L2 = 1227.60e6
PRIF, ionocorr = Ionofree(C1, L1, P2, L2)
fig, ax = plt.subplots()
plt.scatter((seconds.astype(float) -172800)/3600 , ionocorr, 3)
# ax.set_xlim(6, 12)
# ax.set_ylim(min(tropo), max(tropo))
ax.set_title("Iono Correction (m) PRN%s" % PRN)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("Iono Correction (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT8.png' % (PRN, PRN))
dPR1 = C1 - (satExpectedRange - bsv)
dPR2 = C1 - (satExpectedRange - bsv - relsv)
dPR3 = C1 - (satExpectedRange - bsv - relsv + tropo)
dPR4 = PRIF - (satExpectedRange - bsv - relsv + tropo)
# Compute and plot dPR3 = C1 – (R – bsv - relsv + tropo)
fig, ax = plt.subplots()
ax.set_title("DPR (m) PRN%s" % PRN)
plt.scatter((seconds.astype(float) -172800)/3600 , dPR1, 3, label = "dPR1-CLOCK")
plt.scatter((seconds.astype(float) -172800)/3600 , dPR2, 3, label = "dPR2-RELATIVITY")
plt.scatter((seconds.astype(float) -172800)/3600 , dPR3, 3, label = "dPR3-TROPO")
plt.scatter((seconds.astype(float) -172800)/3600 , dPR4, 3, label = "dPR4-IONO")
plt.legend(markerscale=2.)
ax.set_xlabel("Time (hours after Midnight)")
ax.set_ylabel("Difference in Psuedo Range (m)")
ax.grid(True)
fig.savefig('Pics\PRN%s\PRN%sPLOT9.png' % (PRN, PRN))
plt.show() | [
"mema0341@colorado.edu"
] | mema0341@colorado.edu |
007e7be7265a787c6257d743cf325b315388b9c7 | c06e303da383c344e4325e7212096962bc0106fe | /registration/migrations/0004_auto_20161123_0251.py | f0c40dcebf240e45eda851b24261382b0ae8975d | [
"MIT"
] | permissive | arpanpathak/college-fest-management | ac3a06c25f44ece4ece90aabf8be3161724c0cfd | 186ffe78deed7ae4904e809412d84883e669b6bf | refs/heads/master | 2022-12-28T20:16:13.198515 | 2020-10-03T21:36:27 | 2020-10-03T21:36:27 | 300,932,483 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-22 21:21
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0003_candidate_timestamp'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='timeStamp',
field=models.DateTimeField(default=datetime.datetime(2016, 11, 23, 2, 51, 8, 32229), editable=False),
),
]
| [
"arpanpathak1996@gmail.com"
] | arpanpathak1996@gmail.com |
6679cb1ffa32655619b43f27ce1f2a4717c943f8 | fa07e9901e83299e2a996c07ff7483e59dd40e90 | /proj2-iia/main.py | 6e4c83c59ff0c78328096059b5669bca39fecaac | [] | no_license | CarolFP1896/Projeto_IIA | 01a5580db5dc8aaf11f9e3c6fa603101c5eea71d | 929bef3c968e6bc05846cbbaa68ddbfe6f560160 | refs/heads/master | 2023-04-27T16:35:15.780282 | 2021-05-08T01:22:54 | 2021-05-08T01:22:54 | 364,360,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,290 | py | # importa modulos e pacotes necessarios
from dataAnalysis import virus, result, alas, infected
from randomForest import classifierReport, bestParams
import os
# funcao que imprime o menu da analise de dados e retorna os graficos gerados em cada resultado
def analysis():
a = 0
while a != 5:
print ("""
-------------------ANÁLISE DOS DADOS---------------------
1. Resultado dos exames
2. Número de pacientes internados por alas
3. Número de pacientes infectados por variantes do vírus
4. Total de resultados
5. Exit/Quit
---------------------------------------------------------
""")
a = input('Digite a opção desejada: ')
if a=="1":
# se opcao for 1, chama a funcao que mostra plot dos resultados dos exames
os.system('cls' if os.name == 'nt' else 'clear')
result()
elif a=="2":
# se opcao for 2, chama funcao que mostra plot dos pacientes infectados por alas
os.system('cls' if os.name == 'nt' else 'clear')
alas()
elif a=="3":
# se opcao for 3, chama funcao que mostra plot dos pacientes infectados por cada variante do virus
os.system('cls' if os.name == 'nt' else 'clear')
virus()
elif a=="4":
# se for 4, chama a funcao que imprime no terminal o total de resultados de exames
os.system('cls' if os.name == 'nt' else 'clear')
print('\n O total de resultados é: \n', infected())
elif a=="5":
# se opcao for 5, sai do programa
os.system('cls' if os.name == 'nt' else 'clear')
main()
elif a !="":
print('\n Opção inválida! Digite novamente...')
# funcao principal que imprime um menu principal
def main():
a = 0
while a != 5:
print ("""
-------------------- MENU PRINCIPAL ---------------------
1. Análise de dados
2. Relatório de classificação (classifier)
3. Ajuste de hiperparâmetros (best params)
4. Exit/Quit
---------------------------------------------------------
""")
a = input('Digite a opção desejada: ')
if a=="1":
# se opcao for 1, chama a funcao que imprime o menu de analise dos dados
os.system('cls' if os.name == 'nt' else 'clear')
analysis()
elif a=="2":
# se opcao for 2, chama a funcao que imprime no terminal o relatorio de classificao da random forest gerada
os.system('cls' if os.name == 'nt' else 'clear')
classifierReport()
elif a=="3":
# se opcao for 3, chama a funcao que imprime no terminal os resultados dos melhores parametros da random forest gerada
os.system('cls' if os.name == 'nt' else 'clear')
bestParams()
elif a=="4":
# se opcao for 4, sai do programa
os.system('cls' if os.name == 'nt' else 'clear')
exit()
elif a !="":
print('\n Opção inválida! Digite novamente...')
main() | [
"noreply@github.com"
] | CarolFP1896.noreply@github.com |
91258b8a32fba3b8345c1f4e1dca1767557a8d42 | d9b4e7ffa500e4237ffa35b3b74ca6447c6eda25 | /Ejemplos/opticalflow2.py | 5b1be2432e2eccf662507ee634f4814e879fe83d | [
"MIT"
] | permissive | Daniel-hp/Servicio | e21ff171d717500143a2a70eb806d1822b6433d0 | 5436f90cf745431dfd85b37d7848e9bcf6d04fa2 | refs/heads/master | 2021-01-24T10:26:09.395336 | 2018-09-20T21:03:05 | 2018-09-20T21:03:05 | 123,051,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | import cv2
import numpy as np
cap = cv2.VideoCapture("mov.mp4")
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
while(1):
ret, frame2 = cap.read()
next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
cv2.imshow('frame2',rgb)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('opticalfb.png',frame2)
cv2.imwrite('opticalhsv.png',rgb)
prvs = next
cap.release()
cv2.destroyAllWindows() | [
"daniel.hernandez@inediti.com.mx"
] | daniel.hernandez@inediti.com.mx |
e848112ff0d64fd19119144e38b6241dc01354bd | 295b77fcf74065a3942cbca04d476ad8886ab33e | /models/encoders.py | 88399e2f0c006bafe252dddf8cd9de8a0f636498 | [] | no_license | MarcusNerva/spatio_temporal_graph | d999917fd47e82b6620ff7f624c389e479f4dc85 | c391b8dfdf32ac5c0fc92970aea3d22da87f49e6 | refs/heads/master | 2022-11-30T05:04:22.475717 | 2020-08-15T08:24:49 | 2020-08-15T08:24:49 | 282,892,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,728 | py | import torch
import torch.nn as nn
import math
class GCN_layer(nn.Module):
"""
BasicBlock of object branch encoder.
"""
def __init__(self, N, d_model):
super(GCN_layer, self).__init__()
self.N = N
self.d_model = d_model
self.linear = nn.Linear(in_features=d_model, out_features=d_model, bias=False)
self.batchnorm = nn.BatchNorm1d(num_features=N)
self.relu = nn.ReLU(inplace=True)
self.init_paramenters()
def init_paramenters(self):
stdv = 1. / math.sqrt(self.d_model)
self.linear.weight.data.uniform_(-stdv, stdv)
def forward(self, relation_matrix, H_l):
identity = H_l
out = torch.bmm(relation_matrix, H_l)
out = self.linear(out)
out = self.batchnorm(out)
out = identity + out
ret = self.relu(out)
return ret
class GCN(nn.Module):
"""
Encoder of object branch.
"""
def __init__(self, in_feature_size, out_feature_size, N, drop_probability=0.5):
super(GCN, self).__init__()
self.in_feature_size = in_feature_size
self.out_feature_size = out_feature_size
self.N = N
self.d_model = out_feature_size
self.drop_probability = drop_probability
self.linear_weight0 = nn.Linear(in_features=in_feature_size, out_features=out_feature_size, bias=False)
self.gcn_layer0 = GCN_layer(N=self.N, d_model=self.d_model)
self.drop0 = nn.Dropout(p=self.drop_probability, inplace=False)
self.gcn_layer1 = GCN_layer(N=self.N, d_model=self.d_model)
self.drop1 = nn.Dropout(p=self.drop_probability, inplace=False)
self.gcn_layer2 = GCN_layer(N=self.N, d_model=self.d_model)
self.avg_pool = nn.AvgPool1d(kernel_size=5, stride=5)
self.init_paramenters()
def init_paramenters(self):
stdv = 1. / math.sqrt(self.out_feature_size)
self.linear_weight0.weight.data.uniform_(-stdv, stdv)
def forward(self, G_st, F_0):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
diag = torch.sum(G_st, dim=-1, keepdim=True)
diag = torch.eye(self.N).to(torch.float32)[None, :].to(device) * diag
diag = 1. / torch.sqrt(diag)
relation_matrix = torch.bmm(diag, G_st)
relation_matrix = torch.bmm(relation_matrix, diag)
H_0 = self.linear_weight0(F_0)
H_l = self.gcn_layer0(relation_matrix, H_0)
H_l = self.drop0(H_l)
H_l = self.gcn_layer1(relation_matrix, H_l)
H_l = self.drop1(H_l)
H_l = self.gcn_layer2(relation_matrix, H_l)
ret = torch.transpose(H_l, 1, 2)
ret = self.avg_pool(ret)
ret = torch.transpose(ret, 1, 2)
ret = ret.contiguous()
return ret
class SceneEncoder(nn.Module):
"""
Encoder of Scene Branch.
"""
def __init__(self, T, d_2D, d_3D, d_model, drop_probability=0.5):
super(SceneEncoder, self).__init__()
self.T = T
self.d_2d = d_2D
self.d_3d = d_3D
self.d_model = d_model
self.drop_probability = drop_probability
self.w_2d = nn.Sequential(
nn.Linear(in_features=d_2D, out_features=d_model, bias=False),
nn.BatchNorm1d(num_features=T),
nn.ReLU()
)
self.w_3d = nn.Sequential(
nn.Linear(in_features=d_3D, out_features=d_model, bias=False),
nn.BatchNorm1d(num_features=T),
nn.ReLU()
)
self.w_fuse = nn.Sequential(
nn.Linear(in_features=d_model * 2, out_features=d_model, bias=False),
nn.BatchNorm1d(num_features=T),
nn.ReLU()
)
self.dropout = nn.Dropout(p=drop_probability)
def forward(self, F_2D, F_3D):
"""
Args:
F_2D: (batch_size, T, d_2d)
F_3D: (batch_size, T, d_3d)
Returns:
F_s: (batch_size, T, d_model)
"""
f_2d = self.w_2d(F_2D)
f_2d = self.dropout(f_2d)
f_3d = self.w_3d(F_3D)
f_3d = self.dropout(f_3d)
f_s = self.w_fuse(torch.cat([f_2d, f_3d], dim=-1))
return f_s
if __name__ == '__main__':
batch_size = 64
N = 50
d_2d = 1024
d_model = 512
d2d = 2048
d3d = 1024
# s_encoder = SceneEncoder(T=10, d_2D=d2d, d_3D=d3d, d_model=512)
# F_2d = torch.ones((batch_size, 10, d2d))
# F_3d = torch.ones((batch_size, 10, d3d))
# out = s_encoder(F_2d, F_3d)
# print(out.shape)
G_st = torch.ones((batch_size, N, N))
F_0 = torch.ones((batch_size, N, d_2d))
temp_gcn = GCN(in_feature_size=d_2d, out_feature_size=d_model, N=N)
out = temp_gcn(G_st, F_0)
print(out.shape)
| [
"hadrianus_1@163.com"
] | hadrianus_1@163.com |
78a9efb7a3cd7aadec787b2cb1553dc652e4eb6b | 472284579f589e993ea5d73c9c6ac9cdc386cb4a | /.buildozer/android/app/consent_form.py | 68a1784eaf8ffa29fdd4788560495c711afea996 | [] | no_license | CuriosityLabTAU/free_exploration_study_keren | f52b8ebda0e939a809995dc13b76e920655c4a40 | 0c2386cfce42188d2854344fdeb5469fd22d61f9 | refs/heads/master | 2021-01-11T19:13:06.463289 | 2018-01-28T17:06:55 | 2018-01-28T17:06:55 | 79,333,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,668 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.properties import ObjectProperty
from kivy.utils import get_color_from_hex
from hebrew_management import HebrewManagement
from kivy_communication.logged_widgets import *
from kivy.storage.jsonstore import JsonStore
class ConsentCheckBox(LoggedCheckBox):
the_form = None
def on_press(self, *args):
super(ConsentCheckBox, self).on_press(*args)
if self.the_form:
self.the_form.mark_checkbox()
class ConsentButton(LoggedButton):
the_form = None
def on_press(self, *args):
super(ConsentButton, self).on_press(*args)
if self.the_form:
self.the_form.contin()
class ConsentForm(BoxLayout):
title=ObjectProperty()
body=ObjectProperty()
checkbox_agree=ObjectProperty()
checkbox_txt=ObjectProperty()
button=ObjectProperty()
the_app = None
dict = None
body_labels = None
def __init__(self, the_app):
super(ConsentForm, self).__init__()
self.the_app = the_app
self.dict = {'title':self.title,
'body':self.body,
'checkbox_txt':self.checkbox_txt,
'button':self.button}
store = JsonStore('consent_form.json').get('agreement')
for key, value in store.items():
self.dict[key].text = value[::-1]
self.body_labels = []
txt = self.dict['body'].text
new_lines = HebrewManagement.multiline(txt, 50)
for nl in new_lines[::-1]:
self.body_labels.append(Label(text=nl,
font_name="fonts/the_font",
font_size=36,
color=[0,0,0,1]))
self.dict['body'].add_widget(self.body_labels[-1])
def start(self, the_app):
self.button.disabled = True
self.checkbox_agree.active = False
self.button.background_color = (0, 0.71, 1, 1)
def contin(self):
if self.checkbox_agree.active:
# the next screen is the game
# start the clock and then transition
self.the_app.cg.start()
self.the_app.sm.current = self.the_app.sm.next()#"thegame"
else:
print("pls mark checkbox")
def mark_checkbox(self):
if self.checkbox_agree.active:
self.button.background_color = (0, 0.71, 1, 1)
# self.button.background_color = (0.71, 0, 1., 1)
self.button.disabled = False
else:
self.button.disabled = True
def get_color_from_hex(self, color):
return get_color_from_hex(color)
| [
"goren@gorengordon.com"
] | goren@gorengordon.com |
0132b83eb375405710b8474c5bb5f51481fc9ca7 | c74cc826757a8de9b818f3ac78073e6a95da48cd | /test2.py | 73b7b17707872eec99c7c53ad2a6145efc6f14d3 | [] | no_license | ShivaramPrasad/python_stuffs | 06cc418b99255e99fd36d9d719f1daedc6ad4063 | b5dc46889b281154c94ee9376262f88616d558b1 | refs/heads/master | 2020-03-06T14:16:26.051570 | 2018-03-27T05:24:35 | 2018-03-27T05:24:35 | 126,932,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | import pyaudio
import wave
import sys
# length of data to read.
chunk = 1024
# validation. If a wave file hasn't been specified, exit.
if len(sys.argv) < 2:
print "Plays a wave file.\n\n" +\
"Usage: %s filename.wav" % sys.argv[0]
sys.exit(-1)
'''
************************************************************************
This is the start of the "minimum needed to read a wave"
************************************************************************
'''
# open the file for reading.
wf = wave.open(sys.argv[1], 'rb')
# create an audio object
p = pyaudio.PyAudio()
# open stream based on the wave object which has been input.
stream = p.open(format =
p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True)
# read data (based on the chunk size)
data = wf.readframes(chunk)
# play stream (looping from beginning of file to the end)
while data != '':
# writing to the stream is what *actually* plays the sound.
stream.write(data)
data = wf.readframes(chunk)
# cleanup stuff.
stream.close()
p.terminate()
| [
"Shivaram1995@gmail.com"
] | Shivaram1995@gmail.com |
5b21e3f4df19ea2e21f949389e277170a75c1d81 | 67a10f3384d5048bbc0e46c0535b0c113d78c2fa | /examples/pix2pose/pipelines.py | ee429967056c92a86ebea3aaa6a03408cb222e27 | [
"MIT"
] | permissive | DeepanChakravarthiPadmanabhan/fer | af9bc6b65bf6d265c63d107b0f11ab0c09002390 | 920268633aa0643416551212ec2d70f3591b5001 | refs/heads/master | 2023-09-05T03:04:50.468845 | 2021-11-09T23:42:54 | 2021-11-09T23:42:54 | 426,337,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,543 | py | import numpy as np
import os
import glob
import random
from tensorflow.keras.utils import Sequence
from paz.abstract import SequentialProcessor, Processor
from paz.abstract.sequence import SequenceExtra
from paz.pipelines import RandomizeRenderedImage
from paz import processors as pr
class GeneratedImageProcessor(Processor):
"""
Loads pre-generated images
"""
def __init__(self, path_images, background_images_paths, num_occlusions=1, split=pr.TRAIN, no_ambiguities=False):
super(GeneratedImageProcessor, self).__init__()
self.copy = pr.Copy()
self.augment = RandomizeRenderedImage(background_images_paths, num_occlusions)
preprocessors_input = [pr.NormalizeImage()]
preprocessors_output = [NormalizeImageTanh()]
self.preprocess_input = SequentialProcessor(preprocessors_input)
self.preprocess_output = SequentialProcessor(preprocessors_output)
self.split = split
# Total number of images
self.num_images = len(glob.glob(os.path.join(path_images, "image_original/*")))
# Load all images into memory to save time
self.images_original = [np.load(os.path.join(path_images, "image_original/image_original_{}.npy".format(str(i).zfill(7)))) for i in range(self.num_images)]
if no_ambiguities:
self.images_colors = [np.load(os.path.join(path_images, "image_colors_no_ambiguities/image_colors_no_ambiguities_{}.npy".format(str(i).zfill(7)))) for i in range(self.num_images)]
else:
self.images_colors = [np.load(os.path.join(path_images, "image_colors/image_colors_{}.npy".format(str(i).zfill(7)))) for i in range(self.num_images)]
self.alpha_original = [np.load(os.path.join(path_images, "alpha_original/alpha_original_{}.npy".format(str(i).zfill(7)))) for i in range(self.num_images)]
def call(self):
index = random.randint(0, self.num_images-1)
image_original = self.images_original[index]
image_colors = self.images_colors[index]
alpha_original = self.alpha_original[index]
if self.split == pr.TRAIN:
image_original = self.augment(image_original, alpha_original)
image_original = self.preprocess_input(image_original)
image_colors = self.preprocess_output(image_colors)
return image_original, image_colors
class GeneratedImageGenerator(SequentialProcessor):
def __init__(self, path_images, size, background_images_paths, num_occlusions=1, split=pr.TRAIN):
super(GeneratedImageGenerator, self).__init__()
self.add(GeneratedImageProcessor(
path_images, background_images_paths, num_occlusions, split))
self.add(pr.SequenceWrapper(
{0: {'input_image': [size, size, 3]}},
{1: {'color_output': [size, size, 3]}, 0: {'error_output': [size, size, 1]}}))
"""
Creates a batch of train data for the discriminator. For real images the label is 1,
for fake images the label is 0
"""
def make_batch_discriminator(generator, input_images, color_output_images, label):
if label == 1:
return color_output_images, np.ones(len(color_output_images))
elif label == 0:
predictions = generator.predict(input_images)
return predictions[0], np.zeros(len(predictions[0]))
class GeneratingSequencePix2Pose(SequenceExtra):
"""Sequence generator used for generating samples.
Unfortunately the GeneratingSequence class from paz.abstract cannot be used here. Reason: not all of
the training data is available right at the start. The error images depend on the predicted color images,
so that they have to be generated on-the-fly during training. This is done here.
# Arguments
processor: Function used for generating and processing ``samples``.
model: Keras model
batch_size: Int.
num_steps: Int. Number of steps for each epoch.
as_list: Bool, if True ``inputs`` and ``labels`` are dispatched as
lists. If false ``inputs`` and ``labels`` are dispatched as
dictionaries.
"""
def __init__(self, processor, model, batch_size, num_steps, as_list=False, rotation_matrices=None):
self.num_steps = num_steps
self.model = model
self.rotation_matrices = rotation_matrices
super(GeneratingSequencePix2Pose, self).__init__(
processor, batch_size, as_list)
def __len__(self):
return self.num_steps
def rotate_image(self, image, rotation_matrix):
mask_image = np.ma.masked_not_equal(np.sum(image, axis=-1), -1.*3).mask.astype(float)
mask_image = np.repeat(mask_image[..., np.newaxis], 3, axis=-1)
mask_background = np.ones_like(mask_image) - mask_image
# Rotate the object
image_rotated = np.einsum('ij,klj->kli', rotation_matrix, image)
image_rotated *= mask_image
image_rotated += (mask_background * -1.)
return image_rotated
def process_batch(self, inputs, labels, batch_index):
input_images, samples = list(), list()
for sample_arg in range(self.batch_size):
sample = self.pipeline()
samples.append(sample)
input_image = sample['inputs'][self.ordered_input_names[0]]
input_images.append(input_image)
input_images = np.asarray(input_images)
# This line is very important. If model.predict(...) is used instead the results are wrong.
# Reason: BatchNormalization behaves differently, depending on whether it is in train or
# inference mode. model.predict(...) is the inference mode, so the predictions here will
# be different from the predictions the model is trained on --> Result: the error images
# generated here are also wrong
predictions = self.model(input_images, training=True)
# Calculate the errors between the target output and the predicted output
for sample_arg in range(self.batch_size):
sample = samples[sample_arg]
# List of tuples of the form (error, error_image)
stored_errors = []
# Iterate over all rotation matrices to find the object position
# with the smallest error
for rotation_matrix in self.rotation_matrices:
color_image_rotated = self.rotate_image(sample['labels']['color_output'], rotation_matrix)
error_image = np.sum(predictions['color_output'][sample_arg] - color_image_rotated, axis=-1, keepdims=True)
error_value = np.sum(np.abs(error_image))
stored_errors.append((error_value, error_image))
# Select the error image with the smallest error
minimal_error_pair = min(stored_errors, key=lambda t: t[0])
sample['labels'][self.ordered_label_names[0]] = minimal_error_pair[1]
self._place_sample(sample['inputs'], sample_arg, inputs)
self._place_sample(sample['labels'], sample_arg, labels)
return inputs, labels
class NormalizeImageTanh(Processor):
"""
Normalize image so that the values are between -1 and 1
"""
def __init__(self):
super(NormalizeImageTanh, self).__init__()
def call(self, image):
return (image/127.5)-1
class DenormalizeImageTanh(Processor):
"""
Transforms an image from the value range -1 to 1 back to 0 to 255
"""
def __init__(self):
super(DenormalizeImageTanh, self).__init__()
def call(self, image):
return (image + 1.0)*127.5
| [
"deepangrad@gmail.com"
] | deepangrad@gmail.com |
ddf3fe9c16c512db4db55225662acb8fedc66bab | c803b2c8aa43d4c34eba62794af957330e2c5317 | /src/extract_labels.py | 9bd749e9ccb5cdaa149e3f16ade8ae04117bb93f | [] | no_license | utexas-bwi/perception_classifiers | c2da4420ca6d92a06215aa20f8b3dfd3aa628e0f | 15321f73b571daf5fb1d6091870116f04d3ccb08 | refs/heads/master | 2021-09-05T11:44:13.057988 | 2017-07-27T16:55:30 | 2017-07-27T16:55:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | #!/usr/bin/env python
__author__ = 'jesse'
import pickle
import sys
# python extract_labels.py
# [agent_pickle]
# [output_fn]
def main():
# get arguments
agent_fn = sys.argv[1]
out_fn = sys.argv[2]
# load agent
f = open(agent_fn, 'rb')
a = pickle.load(f)
f.close()
# write out labels for each object
f = open(out_fn, 'w')
f.write("predicate,object_id,num_true_labels,num_false_labels\n")
for pred in a.predicate_examples:
for oidx in a.predicate_examples[pred]:
tr = sum([1 if b else 0 for b in a.predicate_examples[pred][oidx]])
fa = len(a.predicate_examples[pred][oidx])-tr
f.write(','.join([pred, str(oidx), str(tr), str(fa)])+'\n')
f.close()
if __name__ == "__main__":
main()
| [
"jesse@cs.utexas.edu"
] | jesse@cs.utexas.edu |
d526bcd601974fc1ebcbe80a5e2954a3412cb522 | 5d9932a1abeae21b8201368e5cf465680f106761 | /data_ccxt/probit.py | 330b5839012523f090ae27ca23e35c190244345b | [] | no_license | qqzhangjian789/text | 5dc6086e55d8a9494b889fa40cc9730da6bf5940 | 938be0df0a965aacf13cfb942548b8d2a1c7cec0 | refs/heads/master | 2023-05-04T11:38:47.178345 | 2021-05-21T17:44:13 | 2021-05-21T17:44:13 | 286,178,737 | 1 | 6 | null | null | null | null | UTF-8 | Python | false | false | 48,360 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from data_ccxt.base.exchange import Exchange
import math
from data_ccxt.base.errors import ExchangeError
from data_ccxt.base.errors import AuthenticationError
from data_ccxt.base.errors import ArgumentsRequired
from data_ccxt.base.errors import BadRequest
from data_ccxt.base.errors import BadSymbol
from data_ccxt.base.errors import BadResponse
from data_ccxt.base.errors import InsufficientFunds
from data_ccxt.base.errors import InvalidAddress
from data_ccxt.base.errors import InvalidOrder
from data_ccxt.base.errors import DDoSProtection
from data_ccxt.base.errors import RateLimitExceeded
from data_ccxt.base.errors import ExchangeNotAvailable
from data_ccxt.base.decimal_to_precision import TRUNCATE
from data_ccxt.base.decimal_to_precision import TICK_SIZE
class probit(Exchange):
def describe(self):
return self.deep_extend(super(probit, self).describe(), {
'id': 'probit',
'name': 'ProBit',
'countries': ['SC', 'KR'], # Seychelles, South Korea
'rateLimit': 250, # ms
'has': {
'CORS': True,
'fetchTime': True,
'fetchMarkets': True,
'fetchCurrencies': True,
'fetchTickers': True,
'fetchTicker': True,
'fetchOHLCV': True,
'fetchOrderBook': True,
'fetchTrades': True,
'fetchBalance': True,
'createOrder': True,
'createMarketOrder': True,
'cancelOrder': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
'signIn': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'10m': '10m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '1W',
'1M': '1M',
},
'version': 'v1',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/79268032-c4379480-7ea2-11ea-80b3-dd96bb29fd0d.jpg',
'api': {
'accounts': 'https://accounts.probit.com',
'public': 'https://api.probit.com/api/exchange',
'private': 'https://api.probit.com/api/exchange',
},
'www': 'https://www.probit.com',
'doc': [
'https://docs-en.probit.com',
'https://docs-ko.probit.com',
],
'fees': 'https://support.probit.com/hc/en-us/articles/360020968611-Trading-Fees',
'referral': 'https://www.probit.com/r/34608773',
},
'api': {
'public': {
'get': [
'market',
'currency',
'currency_with_platform',
'time',
'ticker',
'order_book',
'trade',
'candle',
],
},
'private': {
'post': [
'new_order',
'cancel_order',
'withdrawal',
],
'get': [
'balance',
'order',
'open_order',
'order_history',
'trade_history',
'deposit_address',
],
},
'accounts': {
'post': [
'token',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
},
'exceptions': {
'exact': {
'UNAUTHORIZED': AuthenticationError,
'INVALID_ARGUMENT': BadRequest, # Parameters are not a valid format, parameters are empty, or out of range, or a parameter was sent when not required.
'TRADING_UNAVAILABLE': ExchangeNotAvailable,
'NOT_ENOUGH_BALANCE': InsufficientFunds,
'NOT_ALLOWED_COMBINATION': BadRequest,
'INVALID_ORDER': InvalidOrder, # Requested order does not exist, or it is not your order
'RATE_LIMIT_EXCEEDED': RateLimitExceeded, # You are sending requests too frequently. Please try it later.
'MARKET_UNAVAILABLE': ExchangeNotAvailable, # Market is closed today
'INVALID_MARKET': BadSymbol, # Requested market is not exist
'MARKET_CLOSED': BadSymbol, # {"errorCode":"MARKET_CLOSED"}
'INVALID_CURRENCY': BadRequest, # Requested currency is not exist on ProBit system
'TOO_MANY_OPEN_ORDERS': DDoSProtection, # Too many open orders
'DUPLICATE_ADDRESS': InvalidAddress, # Address already exists in withdrawal address list
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'precisionMode': TICK_SIZE,
'options': {
'createMarketBuyOrderRequiresPrice': True,
'timeInForce': {
'limit': 'gtc',
'market': 'ioc',
},
},
'commonCurrencies': {
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'CBC': 'CryptoBharatCoin',
'HBC': 'Hybrid Bank Cash',
'UNI': 'UNICORN Token',
},
})
def fetch_markets(self, params={}):
response = self.publicGetMarket(params)
#
# {
# "data":[
# {
# "id":"MONA-USDT",
# "base_currency_id":"MONA",
# "quote_currency_id":"USDT",
# "min_price":"0.001",
# "max_price":"9999999999999999",
# "price_increment":"0.001",
# "min_quantity":"0.0001",
# "max_quantity":"9999999999999999",
# "quantity_precision":4,
# "min_cost":"1",
# "max_cost":"9999999999999999",
# "cost_precision":8,
# "taker_fee_rate":"0.2",
# "maker_fee_rate":"0.2",
# "show_in_ui":true,
# "closed":false
# },
# ]
# }
#
markets = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency_id')
quoteId = self.safe_string(market, 'quote_currency_id')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
closed = self.safe_value(market, 'closed', False)
active = not closed
amountPrecision = self.safe_integer(market, 'quantity_precision')
costPrecision = self.safe_integer(market, 'cost_precision')
precision = {
'amount': 1 / math.pow(10, amountPrecision),
'price': self.safe_float(market, 'price_increment'),
'cost': 1 / math.pow(10, costPrecision),
}
takerFeeRate = self.safe_float(market, 'taker_fee_rate')
makerFeeRate = self.safe_float(market, 'maker_fee_rate')
result.append({
'id': id,
'info': market,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'taker': takerFeeRate / 100,
'maker': makerFeeRate / 100,
'limits': {
'amount': {
'min': self.safe_float(market, 'min_quantity'),
'max': self.safe_float(market, 'max_quantity'),
},
'price': {
'min': self.safe_float(market, 'min_price'),
'max': self.safe_float(market, 'max_price'),
},
'cost': {
'min': self.safe_float(market, 'min_cost'),
'max': self.safe_float(market, 'max_cost'),
},
},
})
return result
def fetch_currencies(self, params={}):
response = self.publicGetCurrencyWithPlatform(params)
#
# {
# "data":[
# {
# "id":"USDT",
# "display_name":{"ko-kr":"테더","en-us":"Tether"},
# "show_in_ui":true,
# "platform":[
# {
# "id":"ETH",
# "priority":1,
# "deposit":true,
# "withdrawal":true,
# "currency_id":"USDT",
# "precision":6,
# "min_confirmation_count":15,
# "require_destination_tag":false,
# "display_name":{"name":{"ko-kr":"ERC-20","en-us":"ERC-20"}},
# "min_deposit_amount":"0",
# "min_withdrawal_amount":"1",
# "withdrawal_fee":[
# {"amount":"0.01","priority":2,"currency_id":"ETH"},
# {"amount":"1.5","priority":1,"currency_id":"USDT"},
# ],
# "deposit_fee":{},
# "suspended_reason":"",
# "deposit_suspended":false,
# "withdrawal_suspended":false
# },
# {
# "id":"OMNI",
# "priority":2,
# "deposit":true,
# "withdrawal":true,
# "currency_id":"USDT",
# "precision":6,
# "min_confirmation_count":3,
# "require_destination_tag":false,
# "display_name":{"name":{"ko-kr":"OMNI","en-us":"OMNI"}},
# "min_deposit_amount":"0",
# "min_withdrawal_amount":"5",
# "withdrawal_fee":[{"amount":"5","priority":1,"currency_id":"USDT"}],
# "deposit_fee":{},
# "suspended_reason":"wallet_maintenance",
# "deposit_suspended":false,
# "withdrawal_suspended":false
# }
# ],
# "stakeable":false,
# "unstakeable":false,
# "auto_stake":false,
# "auto_stake_amount":"0"
# }
# ]
# }
#
currencies = self.safe_value(response, 'data')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'id')
code = self.safe_currency_code(id)
displayName = self.safe_value(currency, 'display_name')
name = self.safe_string(displayName, 'en-us')
platforms = self.safe_value(currency, 'platform', [])
platformsByPriority = self.sort_by(platforms, 'priority')
platform = self.safe_value(platformsByPriority, 0, {})
precision = self.safe_integer(platform, 'precision')
depositSuspended = self.safe_value(platform, 'deposit_suspended')
withdrawalSuspended = self.safe_value(platform, 'withdrawal_suspended')
active = not (depositSuspended and withdrawalSuspended)
withdrawalFees = self.safe_value(platform, 'withdrawal_fee', {})
fees = []
# sometimes the withdrawal fee is an empty object
# [{'amount': '0.015', 'priority': 1, 'currency_id': 'ETH'}, {}]
for j in range(0, len(withdrawalFees)):
withdrawalFee = withdrawalFees[j]
amount = self.safe_float(withdrawalFee, 'amount')
priority = self.safe_integer(withdrawalFee, 'priority')
if (amount is not None) and (priority is not None):
fees.append(withdrawalFee)
withdrawalFeesByPriority = self.sort_by(fees, 'priority')
withdrawalFee = self.safe_value(withdrawalFeesByPriority, 0, {})
fee = self.safe_float(withdrawalFee, 'amount')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'deposit': {
'min': self.safe_float(platform, 'min_deposit_amount'),
'max': None,
},
'withdraw': {
'min': self.safe_float(platform, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalance(params)
#
# {
# data: [
# {
# "currency_id":"XRP",
# "total":"100",
# "available":"0",
# }
# ]
# }
#
data = self.safe_value(response, 'data')
result = {'info': data}
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'currency_id')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'total')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
response = self.publicGetOrderBook(self.extend(request, params))
#
# {
# data: [
# {side: 'buy', price: '0.000031', quantity: '10'},
# {side: 'buy', price: '0.00356007', quantity: '4.92156877'},
# {side: 'sell', price: '0.1857', quantity: '0.17'},
# ]
# }
#
data = self.safe_value(response, 'data', [])
dataBySide = self.group_by(data, 'side')
return self.parse_order_book(dataBySide, None, 'buy', 'sell', 'price', 'quantity')
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
marketIds = self.market_ids(symbols)
request['market_ids'] = ','.join(marketIds)
response = self.publicGetTicker(self.extend(request, params))
#
# {
# "data":[
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_tickers(data, symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_ids': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
#
# {
# "data":[
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
ticker = self.safe_value(data, 0)
if ticker is None:
raise BadResponse(self.id + ' fetchTicker() returned an empty response')
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
#
timestamp = self.parse8601(self.safe_string(ticker, 'time'))
marketId = self.safe_string(ticker, 'market_id')
symbol = self.safe_symbol(marketId, market, '-')
close = self.safe_float(ticker, 'last')
change = self.safe_float(ticker, 'change')
percentage = None
open = None
if change is not None:
if close is not None:
open = close - change
percentage = (change / open) * 100
baseVolume = self.safe_float(ticker, 'base_volume')
quoteVolume = self.safe_float(ticker, 'quote_volume')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None, # previous day close
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
'limit': 100,
'start_time': self.iso8601(0),
'end_time': self.iso8601(self.milliseconds()),
}
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
if since is not None:
request['start_time'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = self.privateGetTradeHistory(self.extend(request, params))
#
# {
# data: [
# {
# "id":"BTC-USDT:183566",
# "order_id":"17209376",
# "side":"sell",
# "fee_amount":"0.657396569175",
# "fee_currency_id":"USDT",
# "status":"settled",
# "price":"6573.96569175",
# "quantity":"0.1",
# "cost":"657.396569175",
# "time":"2018-08-10T06:06:46.000Z",
# "market_id":"BTC-USDT"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
'limit': 100,
'start_time': '1970-01-01T00:00:00.000Z',
'end_time': self.iso8601(self.milliseconds()),
}
if since is not None:
request['start_time'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# {
# "data":[
# {
# "id":"ETH-BTC:3331886",
# "price":"0.022981",
# "quantity":"12.337",
# "time":"2020-04-12T20:55:42.371Z",
# "side":"sell",
# "tick_direction":"down"
# },
# {
# "id":"ETH-BTC:3331885",
# "price":"0.022982",
# "quantity":"6.472",
# "time":"2020-04-12T20:55:39.652Z",
# "side":"sell",
# "tick_direction":"down"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"ETH-BTC:3331886",
# "price":"0.022981",
# "quantity":"12.337",
# "time":"2020-04-12T20:55:42.371Z",
# "side":"sell",
# "tick_direction":"down"
# }
#
# fetchMyTrades(private)
#
# {
# "id":"BTC-USDT:183566",
# "order_id":"17209376",
# "side":"sell",
# "fee_amount":"0.657396569175",
# "fee_currency_id":"USDT",
# "status":"settled",
# "price":"6573.96569175",
# "quantity":"0.1",
# "cost":"657.396569175",
# "time":"2018-08-10T06:06:46.000Z",
# "market_id":"BTC-USDT"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'time'))
id = self.safe_string(trade, 'id')
marketId = None
if id is not None:
parts = id.split(':')
marketId = self.safe_string(parts, 0)
marketId = self.safe_string(trade, 'market_id', marketId)
symbol = self.safe_symbol(marketId, market, '-')
side = self.safe_string(trade, 'side')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'quantity')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
orderId = self.safe_string(trade, 'order_id')
feeCost = self.safe_float(trade, 'fee_amount')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'fee_currency_id')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {"data":"2020-04-12T18:54:25.390Z"}
#
timestamp = self.parse8601(self.safe_string(response, 'data'))
return timestamp
def normalize_ohlcv_timestamp(self, timestamp, timeframe, after=False):
duration = self.parse_timeframe(timeframe)
if timeframe == '1M':
iso8601 = self.iso8601(timestamp)
parts = iso8601.split('-')
year = self.safe_string(parts, 0)
month = self.safe_integer(parts, 1)
if after:
month = self.sum(month, 1)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
return year + '-' + month + '-01T00:00:00.000Z'
elif timeframe == '1w':
timestamp = int(timestamp / 1000)
firstSunday = 259200 # 1970-01-04T00:00:00.000Z
difference = timestamp - firstSunday
numWeeks = self.integer_divide(difference, duration)
previousSunday = self.sum(firstSunday, numWeeks * duration)
if after:
previousSunday = self.sum(previousSunday, duration)
return self.iso8601(previousSunday * 1000)
else:
timestamp = int(timestamp / 1000)
timestamp = duration * int(timestamp / duration)
if after:
timestamp = self.sum(timestamp, duration)
return self.iso8601(timestamp * 1000)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
interval = self.timeframes[timeframe]
limit = 100 if (limit is None) else limit
requestLimit = self.sum(limit, 1)
requestLimit = min(1000, requestLimit) # max 1000
request = {
'market_ids': market['id'],
'interval': interval,
'sort': 'asc', # 'asc' will always include the start_time, 'desc' will always include end_time
'limit': requestLimit, # max 1000
}
now = self.milliseconds()
duration = self.parse_timeframe(timeframe)
startTime = since
endTime = now
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires either a since argument or a limit argument')
else:
startTime = now - limit * duration * 1000
else:
if limit is None:
endTime = now
else:
endTime = self.sum(since, self.sum(limit, 1) * duration * 1000)
startTimeNormalized = self.normalize_ohlcv_timestamp(startTime, timeframe)
endTimeNormalized = self.normalize_ohlcv_timestamp(endTime, timeframe, True)
request['start_time'] = startTimeNormalized
request['end_time'] = endTimeNormalized
response = self.publicGetCandle(self.extend(request, params))
#
# {
# "data":[
# {
# "market_id":"ETH-BTC",
# "open":"0.02811",
# "close":"0.02811",
# "low":"0.02811",
# "high":"0.02811",
# "base_volume":"0.0005",
# "quote_volume":"0.000014055",
# "start_time":"2018-11-30T18:19:00.000Z",
# "end_time":"2018-11-30T18:20:00.000Z"
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "market_id":"ETH-BTC",
# "open":"0.02811",
# "close":"0.02811",
# "low":"0.02811",
# "high":"0.02811",
# "base_volume":"0.0005",
# "quote_volume":"0.000014055",
# "start_time":"2018-11-30T18:19:00.000Z",
# "end_time":"2018-11-30T18:20:00.000Z"
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'start_time')),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'base_volume'),
]
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
since = self.parse8601(since)
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
response = self.privateGetOpenOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_orders(data, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'start_time': self.iso8601(0),
'end_time': self.iso8601(self.milliseconds()),
'limit': 100,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
if since:
request['start_time'] = self.iso8601(since)
if limit:
request['limit'] = limit
response = self.privateGetOrderHistory(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_orders(data, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
else:
request['order_id'] = id
query = self.omit(params, ['clientOrderId', 'client_order_id'])
response = self.privateGetOrder(self.extend(request, query))
data = self.safe_value(response, 'data', [])
order = self.safe_value(data, 0)
return self.parse_order(order, market)
def parse_order_status(self, status):
statuses = {
'open': 'open',
'cancelled': 'canceled',
'filled': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# id: string,
# user_id: string,
# market_id: string,
# type: 'orderType',
# side: 'side',
# quantity: string,
# limit_price: string,
# time_in_force: 'timeInForce',
# filled_cost: string,
# filled_quantity: string,
# open_quantity: string,
# cancelled_quantity: string,
# status: 'orderStatus',
# time: 'date',
# client_order_id: string,
# }
#
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
marketId = self.safe_string(order, 'market_id')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.parse8601(self.safe_string(order, 'time'))
price = self.safe_float(order, 'limit_price')
filled = self.safe_float(order, 'filled_quantity')
remaining = self.safe_float(order, 'open_quantity')
canceledAmount = self.safe_float(order, 'cancelled_quantity')
if canceledAmount is not None:
remaining = self.sum(remaining, canceledAmount)
amount = self.safe_float(order, 'quantity', self.sum(filled, remaining))
cost = self.safe_float_2(order, 'filled_cost', 'cost')
if type == 'market':
price = None
clientOrderId = self.safe_string(order, 'client_order_id')
if clientOrderId == '':
clientOrderId = None
timeInForce = self.safe_string_upper(order, 'time_in_force')
return self.safe_order({
'id': id,
'info': order,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'side': side,
'status': status,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'average': None,
'cost': cost,
'fee': None,
'trades': None,
})
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
options = self.safe_value(self.options, 'timeInForce')
defaultTimeInForce = self.safe_value(options, type)
timeInForce = self.safe_string_2(params, 'timeInForce', 'time_in_force', defaultTimeInForce)
request = {
'market_id': market['id'],
'type': type,
'side': side,
'time_in_force': timeInForce,
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
costToPrecision = None
if type == 'limit':
request['limit_price'] = self.price_to_precision(symbol, price)
request['quantity'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
cost = self.safe_float(params, 'cost')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if cost is None:
cost = amount * price
elif cost is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument for market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'cost' extra parameter(the exchange-specific behaviour)")
else:
cost = amount if (cost is None) else cost
costToPrecision = self.cost_to_precision(symbol, cost)
request['cost'] = costToPrecision
else:
request['quantity'] = self.amount_to_precision(symbol, amount)
query = self.omit(params, ['timeInForce', 'time_in_force', 'clientOrderId', 'client_order_id'])
response = self.privatePostNewOrder(self.extend(request, query))
#
# {
# data: {
# id: string,
# user_id: string,
# market_id: string,
# type: 'orderType',
# side: 'side',
# quantity: string,
# limit_price: string,
# time_in_force: 'timeInForce',
# filled_cost: string,
# filled_quantity: string,
# open_quantity: string,
# cancelled_quantity: string,
# status: 'orderStatus',
# time: 'date',
# client_order_id: string,
# }
# }
#
data = self.safe_value(response, 'data')
order = self.parse_order(data, market)
# a workaround for incorrect huge amounts
# returned by the exchange on market buys
if (type == 'market') and (side == 'buy'):
order['amount'] = None
order['cost'] = float(costToPrecision)
order['remaining'] = None
return order
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
'order_id': id,
}
response = self.privatePostCancelOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_order(data)
def parse_deposit_address(self, depositAddress, currency=None):
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'destination_tag')
currencyId = self.safe_string(depositAddress, 'currency_id')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency_id': currency['id'],
}
response = self.privateGetDepositAddress(self.extend(request, params))
#
# {
# "data":[
# {
# "currency_id":"ETH",
# "address":"0x12e2caf3c4051ba1146e612f532901a423a9898a",
# "destination_tag":null
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
firstAddress = self.safe_value(data, 0)
if firstAddress is None:
raise InvalidAddress(self.id + ' fetchDepositAddress returned an empty response')
return self.parse_deposit_address(firstAddress, currency)
def fetch_deposit_addresses(self, codes=None, params={}):
self.load_markets()
request = {}
if codes:
currencyIds = []
for i in range(0, len(codes)):
currency = self.currency(codes[i])
currencyIds.append(currency['id'])
request['currency_id'] = ','.join(codes)
response = self.privateGetDepositAddress(self.extend(request, params))
data = self.safe_value(response, 'data', [])
return self.parse_deposit_addresses(data)
def withdraw(self, code, amount, address, tag=None, params={}):
# In order to use self method
# you need to allow API withdrawal from the API Settings Page, and
# and register the list of withdrawal addresses and destination tags on the API Settings page
# you can only withdraw to the registered addresses using the API
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag is None:
tag = ''
request = {
'currency_id': currency['id'],
# 'platform_id': 'ETH', # if omitted it will use the default platform for the currency
'address': address,
'destination_tag': tag,
'amount': self.currency_to_precision(code, amount),
# which currency to pay the withdrawal fees
# only applicable for currencies that accepts multiple withdrawal fee options
# 'fee_currency_id': 'ETH', # if omitted it will use the default fee policy for each currency
# whether the amount field includes fees
# 'include_fee': False, # makes sense only when fee_currency_id is equal to currency_id
}
response = self.privatePostWithdrawal(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_transaction(data, currency)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'id')
amount = self.safe_float(transaction, 'amount')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'destination_tag')
txid = self.safe_string(transaction, 'hash')
timestamp = self.parse8601(self.safe_string(transaction, 'time'))
type = self.safe_string(transaction, 'type')
currencyId = self.safe_string(transaction, 'currency_id')
code = self.safe_currency_code(currencyId)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
feeCost = self.safe_float(transaction, 'fee')
fee = None
if feeCost is not None and feeCost != 0:
fee = {
'currency': code,
'cost': feeCost,
}
return {
'id': id,
'currency': code,
'amount': amount,
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'status': status,
'type': type,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
'info': transaction,
}
def parse_transaction_status(self, status):
statuses = {
'requested': 'pending',
'pending': 'pending',
'confirming': 'pending',
'confirmed': 'pending',
'applying': 'pending',
'done': 'ok',
'cancelled': 'canceled',
'cancelling': 'canceled',
}
return self.safe_string(statuses, status, status)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/'
query = self.omit(params, self.extract_params(path))
if api == 'accounts':
self.check_required_credentials()
url += self.implode_params(path, params)
auth = self.apiKey + ':' + self.secret
auth64 = self.string_to_base64(auth)
headers = {
'Authorization': 'Basic ' + self.decode(auth64),
'Content-Type': 'application/json',
}
if query:
body = self.json(query)
else:
url += self.version + '/'
if api == 'public':
url += self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
now = self.milliseconds()
self.check_required_credentials()
expires = self.safe_integer(self.options, 'expires')
if (expires is None) or (expires < now):
raise AuthenticationError(self.id + ' access token expired, call signIn() method')
accessToken = self.safe_string(self.options, 'accessToken')
headers = {
'Authorization': 'Bearer ' + accessToken,
}
url += self.implode_params(path, params)
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif query:
body = self.json(query)
headers['Content-Type'] = 'application/json'
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def sign_in(self, params={}):
self.check_required_credentials()
request = {
'grant_type': 'client_credentials', # the only supported value
}
response = self.accountsPostToken(self.extend(request, params))
#
# {
# access_token: '0ttDv/2hTTn3bLi8GP1gKaneiEQ6+0hOBenPrxNQt2s=',
# token_type: 'bearer',
# expires_in: 900
# }
#
expiresIn = self.safe_integer(response, 'expires_in')
accessToken = self.safe_string(response, 'access_token')
self.options['accessToken'] = accessToken
self.options['expires'] = self.sum(self.milliseconds(), expiresIn * 1000)
return response
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'errorCode' in response:
errorCode = self.safe_string(response, 'errorCode')
message = self.safe_string(response, 'message')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback)
| [
"qqzhangjian000@163.com"
] | qqzhangjian000@163.com |
2cf6492ee3dfbfa6bd214d7e2b1ca83624f2a845 | 3b8e0e0f556e9dd1d632ac495efec58640eae570 | /espacesis/users/apps.py | 5d654f636ec7d30debca580f7ec79513816fe366 | [
"MIT"
] | permissive | espacesis/espacesis | bfcee2ca5618056ddd410be6093068e2ae529f5e | 413a268152dd80cfad4f85a04cf48305194faf15 | refs/heads/master | 2020-04-10T14:33:03.412749 | 2019-05-17T05:04:30 | 2019-05-17T05:04:30 | 161,080,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = "espacesis.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals # noqa F401
except ImportError:
pass
| [
"jeanluc.kabulu@gmail.com"
] | jeanluc.kabulu@gmail.com |
f2bcde7321c6389fa62d68ffadf1fe9e9a3e2738 | 139aadef22740cac74b7698f59f711e8b5fc519f | /database_setup.py | 9abee79e367270f481e33727f847a2b838191f94 | [] | no_license | marielesf/devPython | 45adf0a627037508401b9711ae1beba16dfb2b8b | 0838c1652688ba3e1ebf75cdf10b2627847d1585 | refs/heads/master | 2021-04-29T11:01:46.412892 | 2017-01-16T17:46:46 | 2017-01-16T17:46:46 | 77,857,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative impor declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declaritive_base()
class Restaurant(Base):
__tablename__ = 'restaurant'
id = Column(Integer, primary_key=True)
name = Column (String(250), nullable=False)
class MenuItem(Base):
__tablename__ = 'menu_item'
name = Column(String(80), nullable = False)
id = Column (Integer, primary_key = True)
description = Column(String(250))
price = Column(Strign(8))
restaurant_id = Column(Integer ForeignKey('restaurant.id'))
restaurant = relationship(Restaurant)
########insert at end of file##########
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.create_all(engine)
| [
"noreply@github.com"
] | marielesf.noreply@github.com |
9988d9bd6fd7d7fe849917f6ed4e4acab908889d | d624cb4bfd4f97f363244b872746b0706625d870 | /ENV/bin/django-admin.py | 1218b5ffdebdda97b39544c685534ca915bdd9ac | [] | no_license | HelleEngstrom/python-lek | c19d72f3c25384378d458054144d70e701e53760 | 81ab49fa89c05c7b1de1a590c2010822f4281bf3 | refs/heads/master | 2020-04-15T22:41:13.768489 | 2019-01-10T15:21:09 | 2019-01-10T15:21:09 | 165,082,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #!/home/helena/Code/Python-lek/ENV/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"helena.engstrom@dynabyte.se"
] | helena.engstrom@dynabyte.se |
0d1d878e2329886bb5feec51308c80e5bda54174 | 864857a25347f5119372d5313baef6f04a5294a2 | /TAlibs_getsize.py | 19ae3f4d651f6af7ef6cfc270d4836530c7fc937 | [] | no_license | holianh/Linux_DeepLearning_tools | a34f59798cee958a22d738f996e81e1ef053a380 | d3b6966adf3378319e4249520e8404cf25f637f9 | refs/heads/master | 2022-01-20T23:30:18.803634 | 2022-01-07T16:52:06 | 2022-01-07T16:52:06 | 118,061,747 | 7 | 6 | null | 2021-01-23T10:44:55 | 2018-01-19T01:48:20 | Python | UTF-8 | Python | false | false | 799 | py | import sys
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
print("TAlibs_getsize.py")
print("var=get_size(THCH30) ")
| [
"noreply@github.com"
] | holianh.noreply@github.com |
114a64747dd2836aeedce52806d30e06ba7d8dea | 9bb6d50cd768cbcc38fc231e766c90533ce857a0 | /Data-Scripts/signal-graph-generator.py | ab30451a6e23fd82448eedbc19b07aaa3889fe43 | [
"MIT"
] | permissive | jsteersCoding/mesh-network-research | 87e6b146879f3796e427d5ef6790519672395cdb | d71e6b1a90b7c662248c14a6aea8aaa84ee30cd0 | refs/heads/master | 2020-12-12T18:43:29.059474 | 2020-01-16T00:48:48 | 2020-01-16T00:48:48 | 234,187,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,405 | py | #!/usr/bin/python3
import os, os.path, sys, subprocess
import numpy as np
import glob
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib import dates
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
import datetime as dt
import time
import matplotlib
font = {'weight' : 'bold',
'size' : 16}
matplotlib.rc('font', **font)
"""
signal-graph-generator.py file
Counts how many files are inside the pmacs folder
Creates subplot for the graph containging an overall comparison between the signal strength of all recorded devices throughout the period of capture from the prototype system
For each file inside the pmacs folder
Open the file
Read the contents of the file
Store values of first column inside variable x (the timestamp values), then repeat for second column in y (the signal strength)
Then plot values onto a graph with the name of the file set to the graph
Creates comparison graph showing the signal strength changes of each device together throughout the duration of the execution of the Prototype System (could also consider calculating averages for future), asigns a different colour to each found device on the graph
Run on terminal:
user1:~/$ python signal-graph-generator.py
"""
files=len(os.listdir('mesh_devices/'))
compr = plt.figure(figsize=(200,200))
compr.set_size_inches(17, 6)
axcomp = compr.add_subplot(111)
axcomp.set_xlabel("Time in Seconds")
axcomp.set_ylabel("Received Signal Strength in dBm")
colours = ['b', 'r', 'g', 'y','m','k','c']
select_colour=0
global starttime
global currenttime
timestamp_list = []
signalstrength_list = []
for file in sorted(glob.iglob('elapsed_time_mesh_devices/mesh_devices/*.txt')):
with open(file,"r") as f:
for line in f:
currenttime=line.split(' ')[0]
timestamp_list.append(currenttime)
sig_str=line.split(' ')[1]
signalstrength_list.append(sig_str)
axcomp.plot(timestamp_list,signalstrength_list, c=colours[select_colour],label="Device {}".format(chr(ord('B')+select_colour)),linewidth=2)
select_colour=select_colour+1
filename=f.name
filename += '.pdf'
plt.savefig(filename, bbox_inches='tight')
axcomp.legend()
timestamp_list=[]
signalstrength_list=[]
compr.savefig("strength-comparison.pdf",bbox_inches='tight')
| [
"jsteers1github@outlook.com"
] | jsteers1github@outlook.com |
69a71e23b6ecdd0920b8d65f870bbc97bea52e25 | bc3faef4df41d04dd080a10de084d50962861b55 | /4. Sensitivity Analysis, Appendix A/PF_PO_dim.py | 7a701ce513995f178222f57196822491bc1181f2 | [
"MIT"
] | permissive | rioarya/IDN_LC_WoodyBiomass | 3f65ab79b759b8f9e3a11872dc83857a6b15acf1 | 0042fd4333212e65735f3643ecb59971d1bd9466 | refs/heads/main | 2023-07-28T04:23:39.737572 | 2021-09-14T05:23:14 | 2021-09-14T05:23:14 | 404,382,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99,986 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #t-C #source: van Beijma et al. (2018)
initAGB_min = 233-72 #t-C
initAGB_max = 233 + 72 #t-C
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26 #years
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #fraction of carbon content in biomass
c_cont_po_plasma = 0.5454 #fraction of carbon content in biomass
tf = 201 #years
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S2nu = df2nu['Firewood_other_energy_use'].values
c_firewood_energy_S2pl = df2pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
def decomp(t,remainAGB):
return (1-(1-np.exp(-a*t))**b)*remainAGB
#set zero matrix
output_decomp = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp[i:,i] = decomp(t[:len(t)-i],remain_part)
print(output_decomp[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix[:,i] = np.diff(output_decomp[:,i])
i = i + 1
print(subs_matrix[:,:4])
print(len(subs_matrix))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix = subs_matrix.clip(max=0)
print(subs_matrix[:,:4])
#make the results as absolute values
subs_matrix = abs(subs_matrix)
print(subs_matrix[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix)
subs_matrix = np.vstack((zero_matrix, subs_matrix))
print(subs_matrix[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot = (tf,1)
decomp_emissions = np.zeros(matrix_tot)
i = 0
while i < tf:
decomp_emissions[:,0] = decomp_emissions[:,0] + subs_matrix[:,i]
i = i + 1
print(decomp_emissions[:,0])
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
#product lifetime
#building materials
B = 35
TestDSM2nu = DynamicStockModel(t = df2nu['Year'].values, i = df2nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM2pl = DynamicStockModel(t = df2pl['Year'].values, i = df2pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3nu = DynamicStockModel(t = df3nu['Year'].values, i = df3nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3pl = DynamicStockModel(t = df3pl['Year'].values, i = df3pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr2nu, ExitFlag2nu = TestDSM2nu.dimension_check()
CheckStr2pl, ExitFlag2pl = TestDSM2pl.dimension_check()
CheckStr3nu, ExitFlag3nu = TestDSM3nu.dimension_check()
CheckStr3pl, ExitFlag3pl = TestDSM3pl.dimension_check()
Stock_by_cohort2nu, ExitFlag2nu = TestDSM2nu.compute_s_c_inflow_driven()
Stock_by_cohort2pl, ExitFlag2pl = TestDSM2pl.compute_s_c_inflow_driven()
Stock_by_cohort3nu, ExitFlag3nu = TestDSM3nu.compute_s_c_inflow_driven()
Stock_by_cohort3pl, ExitFlag3pl = TestDSM3pl.compute_s_c_inflow_driven()
S2nu, ExitFlag2nu = TestDSM2nu.compute_stock_total()
S2pl, ExitFlag2pl = TestDSM2pl.compute_stock_total()
S3nu, ExitFlag3nu = TestDSM3nu.compute_stock_total()
S3pl, ExitFlag3pl = TestDSM3pl.compute_stock_total()
O_C2nu, ExitFlag2nu = TestDSM2nu.compute_o_c_from_s_c()
O_C2pl, ExitFlag2pl = TestDSM2pl.compute_o_c_from_s_c()
O_C3nu, ExitFlag3nu = TestDSM3nu.compute_o_c_from_s_c()
O_C3pl, ExitFlag3pl = TestDSM3pl.compute_o_c_from_s_c()
O2nu, ExitFlag2nu = TestDSM2nu.compute_outflow_total()
O2pl, ExitFlag2pl = TestDSM2pl.compute_outflow_total()
O3nu, ExitFlag3nu = TestDSM3nu.compute_outflow_total()
O3pl, ExitFlag3pl = TestDSM3pl.compute_outflow_total()
DS2nu, ExitFlag2nu = TestDSM2nu.compute_stock_change()
DS2pl, ExitFlag2pl = TestDSM2pl.compute_stock_change()
DS3nu, ExitFlag3nu = TestDSM3nu.compute_stock_change()
DS3pl, ExitFlag3pl = TestDSM3pl.compute_stock_change()
Bal2nu, ExitFlag2nu = TestDSM2nu.check_stock_balance()
Bal2pl, ExitFlag2pl = TestDSM2pl.check_stock_balance()
Bal3nu, ExitFlag3nu = TestDSM3nu.check_stock_balance()
Bal3pl, ExitFlag3pl = TestDSM3pl.check_stock_balance()
#print output flow
print(TestDSM2nu.o)
print(TestDSM2pl.o)
print(TestDSM3nu.o)
print(TestDSM3pl.o)
#%%
#Step (5): Biomass growth
#Model I Oil Palm Biomass Growth (Khasanah et al. (2015))
A = range(0,tf_palmoil,1)
#calculate the biomass and carbon content of palm oil trees over time
def Y_nucleus(A):
return (44/12*1000*c_cont_po_nucleus*(a_nucleus*A + b_nucleus))
output_Y_nucleus = np.array([Y_nucleus(Ai) for Ai in A])
print(output_Y_nucleus)
def Y_plasma(A):
return (44/12*1000*c_cont_po_plasma*(a_plasma*A + b_plasma))
output_Y_plasma = np.array([Y_plasma(Ai) for Ai in A])
print(output_Y_plasma)
##8 times 25-year cycle of new AGB of oil palm, one year gap between the cycle
#nucleus
counter = range(0,8,1)
y_nucleus = []
for i in counter:
y_nucleus.append(output_Y_nucleus)
flat_list_nucleus = []
for sublist in y_nucleus:
for item in sublist:
flat_list_nucleus.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_nucleus = flat_list_nucleus[:len(flat_list_nucleus)-7]
#plasma
y_plasma = []
for i in counter:
y_plasma.append(output_Y_plasma)
flat_list_plasma = []
for sublist in y_plasma:
for item in sublist:
flat_list_plasma.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_plasma = flat_list_plasma[:len(flat_list_plasma)-7]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_nucleus)
plt.plot(t, flat_list_plasma, color='seagreen')
plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha=0.4)
plt.xlabel('Time (year)')
plt.ylabel('AGB (tCO2-eq/ha)')
plt.show()
###Yearly Sequestration
###Nucleus
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_nucleus(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_nucleus = [p - q for q, p in zip(flat_list_nucleus, flat_list_nucleus[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_nuclues' with 0 values
flat_list_nucleus = [0 if i < 0 else i for i in flat_list_nucleus]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_nucleus.insert(0,var)
#make 'flat_list_nucleus' elements negative numbers to denote sequestration
flat_list_nucleus = [ -x for x in flat_list_nucleus]
print(flat_list_nucleus)
#Plasma
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_plasma(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_plasma = [t - u for u, t in zip(flat_list_plasma, flat_list_plasma[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_plasma' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_plasma = [0 if i < 0 else i for i in flat_list_plasma]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_plasma.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_plasma = [ -x for x in flat_list_plasma]
print(flat_list_plasma)
#%%
#Step(6): post-harvest processing of wood/palm oil
#post-harvest wood processing
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_HWP_S2nu = df2nu['PH_Emissions_HWP'].values
PH_Emissions_HWP_S2pl = df2pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Enu = df3pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Epl = df3pl['PH_Emissions_HWP'].values
#post-harvest palm oil processing
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_PO_S2nu = df2nu['PH_Emissions_PO'].values
PH_Emissions_PO_S2pl = df2pl['PH_Emissions_PO'].values
PH_Emissions_PO_Enu = df3pl['PH_Emissions_PO'].values
PH_Emissions_PO_Epl = df3pl['PH_Emissions_PO'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S2nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
def decomp_CH4_S2nu(t,remainAGB_CH4_S2nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S2nu
#set zero matrix
output_decomp_CH4_S2nu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S2nu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S2nu[i:,i] = decomp_CH4_S2nu(t[:len(t)-i],remain_part_CH4_S2nu)
print(output_decomp_CH4_S2nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S2nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S2nu[:,i] = np.diff(output_decomp_CH4_S2nu[:,i])
i = i + 1
print(subs_matrix_CH4_S2nu[:,:4])
print(len(subs_matrix_CH4_S2nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S2nu = subs_matrix_CH4_S2nu.clip(max=0)
print(subs_matrix_CH4_S2nu[:,:4])
#make the results as absolute values
subs_matrix_CH4_S2nu = abs(subs_matrix_CH4_S2nu)
print(subs_matrix_CH4_S2nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S2nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S2nu)
subs_matrix_CH4_S2nu = np.vstack((zero_matrix_CH4_S2nu, subs_matrix_CH4_S2nu))
print(subs_matrix_CH4_S2nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S2nu = (tf,1)
decomp_tot_CH4_S2nu = np.zeros(matrix_tot_CH4_S2nu)
i = 0
while i < tf:
decomp_tot_CH4_S2nu[:,0] = decomp_tot_CH4_S2nu[:,0] + subs_matrix_CH4_S2nu[:,i]
i = i + 1
print(decomp_tot_CH4_S2nu[:,0])
#S2pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
tf = 201
t = np.arange(tf)
def decomp_CH4_S2pl(t,remainAGB_CH4_S2pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S2pl
#set zero matrix
output_decomp_CH4_S2pl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S2pl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S2pl[i:,i] = decomp_CH4_S2pl(t[:len(t)-i],remain_part_CH4_S2pl)
print(output_decomp_CH4_S2pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S2pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S2pl[:,i] = np.diff(output_decomp_CH4_S2pl[:,i])
i = i + 1
print(subs_matrix_CH4_S2pl[:,:4])
print(len(subs_matrix_CH4_S2pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S2pl = subs_matrix_CH4_S2pl.clip(max=0)
print(subs_matrix_CH4_S2pl[:,:4])
#make the results as absolute values
subs_matrix_CH4_S2pl = abs(subs_matrix_CH4_S2pl)
print(subs_matrix_CH4_S2pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S2pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S2pl)
subs_matrix_CH4_S2pl = np.vstack((zero_matrix_CH4_S2pl, subs_matrix_CH4_S2pl))
print(subs_matrix_CH4_S2pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S2pl = (tf,1)
decomp_tot_CH4_S2pl = np.zeros(matrix_tot_CH4_S2pl)
i = 0
while i < tf:
decomp_tot_CH4_S2pl[:,0] = decomp_tot_CH4_S2pl[:,0] + subs_matrix_CH4_S2pl[:,i]
i = i + 1
print(decomp_tot_CH4_S2pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CH4_Enu(t,remainAGB_CH4_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Enu
#set zero matrix
output_decomp_CH4_Enu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Enu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Enu[i:,i] = decomp_CH4_Enu(t[:len(t)-i],remain_part_CH4_Enu)
print(output_decomp_CH4_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Enu[:,i] = np.diff(output_decomp_CH4_Enu[:,i])
i = i + 1
print(subs_matrix_CH4_Enu[:,:4])
print(len(subs_matrix_CH4_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Enu = subs_matrix_CH4_Enu.clip(max=0)
print(subs_matrix_CH4_Enu[:,:4])
#make the results as absolute values
subs_matrix_CH4_Enu = abs(subs_matrix_CH4_Enu)
print(subs_matrix_CH4_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Enu)
subs_matrix_CH4_Enu = np.vstack((zero_matrix_CH4_Enu, subs_matrix_CH4_Enu))
print(subs_matrix_CH4_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Enu = (tf,1)
decomp_tot_CH4_Enu= np.zeros(matrix_tot_CH4_Enu)
i = 0
while i < tf:
decomp_tot_CH4_Enu[:,0] = decomp_tot_CH4_Enu[:,0] + subs_matrix_CH4_Enu[:,i]
i = i + 1
print(decomp_tot_CH4_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CH4_Epl(t,remainAGB_CH4_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Epl
#set zero matrix
output_decomp_CH4_Epl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Epl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Epl[i:,i] = decomp_CH4_Epl(t[:len(t)-i],remain_part_CH4_Epl)
print(output_decomp_CH4_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Epl[:,i] = np.diff(output_decomp_CH4_Epl[:,i])
i = i + 1
print(subs_matrix_CH4_Epl[:,:4])
print(len(subs_matrix_CH4_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Epl = subs_matrix_CH4_Epl.clip(max=0)
print(subs_matrix_CH4_Epl[:,:4])
#make the results as absolute values
subs_matrix_CH4_Epl = abs(subs_matrix_CH4_Epl)
print(subs_matrix_CH4_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Epl)
subs_matrix_CH4_Epl = np.vstack((zero_matrix_CH4_Epl, subs_matrix_CH4_Epl))
print(subs_matrix_CH4_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Epl = (tf,1)
decomp_tot_CH4_Epl = np.zeros(matrix_tot_CH4_Epl)
i = 0
while i < tf:
decomp_tot_CH4_Epl[:,0] = decomp_tot_CH4_Epl[:,0] + subs_matrix_CH4_Epl[:,i]
i = i + 1
print(decomp_tot_CH4_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S2nu,label='CH4_S2nu')
plt.plot(t,decomp_tot_CH4_S2pl,label='CH4_S2pl')
plt.plot(t,decomp_tot_CH4_Enu,label='CH4_Enu')
plt.plot(t,decomp_tot_CH4_Epl,label='CH4_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S2nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
def decomp_CO2_S2nu(t,remainAGB_CO2_S2nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S2nu
#set zero matrix
output_decomp_CO2_S2nu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S2nu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S2nu[i:,i] = decomp_CO2_S2nu(t[:len(t)-i],remain_part_CO2_S2nu)
print(output_decomp_CO2_S2nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S2nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S2nu[:,i] = np.diff(output_decomp_CO2_S2nu[:,i])
i = i + 1
print(subs_matrix_CO2_S2nu[:,:4])
print(len(subs_matrix_CO2_S2nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S2nu = subs_matrix_CO2_S2nu.clip(max=0)
print(subs_matrix_CO2_S2nu[:,:4])
#make the results as absolute values
subs_matrix_CO2_S2nu = abs(subs_matrix_CO2_S2nu)
print(subs_matrix_CO2_S2nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S2nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S2nu)
subs_matrix_CO2_S2nu = np.vstack((zero_matrix_CO2_S2nu, subs_matrix_CO2_S2nu))
print(subs_matrix_CO2_S2nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S2nu = (tf,1)
decomp_tot_CO2_S2nu = np.zeros(matrix_tot_CO2_S2nu)
i = 0
while i < tf:
decomp_tot_CO2_S2nu[:,0] = decomp_tot_CO2_S2nu[:,0] + subs_matrix_CO2_S2nu[:,i]
i = i + 1
print(decomp_tot_CO2_S2nu[:,0])
#S2pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
tf = 201
t = np.arange(tf)
def decomp_CO2_S2pl(t,remainAGB_CO2_S2pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S2pl
#set zero matrix
output_decomp_CO2_S2pl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S2pl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S2pl[i:,i] = decomp_CO2_S2pl(t[:len(t)-i],remain_part_CO2_S2pl)
print(output_decomp_CO2_S2pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S2pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S2pl[:,i] = np.diff(output_decomp_CO2_S2pl[:,i])
i = i + 1
print(subs_matrix_CO2_S2pl[:,:4])
print(len(subs_matrix_CO2_S2pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S2pl = subs_matrix_CO2_S2pl.clip(max=0)
print(subs_matrix_CO2_S2pl[:,:4])
#make the results as absolute values
subs_matrix_CO2_S2pl = abs(subs_matrix_CO2_S2pl)
print(subs_matrix_CO2_S2pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S2pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S2pl)
subs_matrix_CO2_S2pl = np.vstack((zero_matrix_CO2_S2pl, subs_matrix_CO2_S2pl))
print(subs_matrix_CO2_S2pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S2pl = (tf,1)
decomp_tot_CO2_S2pl = np.zeros(matrix_tot_CO2_S2pl)
i = 0
while i < tf:
decomp_tot_CO2_S2pl[:,0] = decomp_tot_CO2_S2pl[:,0] + subs_matrix_CO2_S2pl[:,i]
i = i + 1
print(decomp_tot_CO2_S2pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CO2_Enu(t,remainAGB_CO2_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Enu
#set zero matrix
output_decomp_CO2_Enu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Enu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Enu[i:,i] = decomp_CO2_Enu(t[:len(t)-i],remain_part_CO2_Enu)
print(output_decomp_CO2_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Enu[:,i] = np.diff(output_decomp_CO2_Enu[:,i])
i = i + 1
print(subs_matrix_CO2_Enu[:,:4])
print(len(subs_matrix_CO2_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Enu = subs_matrix_CO2_Enu.clip(max=0)
print(subs_matrix_CO2_Enu[:,:4])
#make the results as absolute values
subs_matrix_CO2_Enu = abs(subs_matrix_CO2_Enu)
print(subs_matrix_CO2_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Enu)
subs_matrix_CO2_Enu = np.vstack((zero_matrix_CO2_Enu, subs_matrix_CO2_Enu))
print(subs_matrix_CO2_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Enu = (tf,1)
decomp_tot_CO2_Enu= np.zeros(matrix_tot_CO2_Enu)
i = 0
while i < tf:
decomp_tot_CO2_Enu[:,0] = decomp_tot_CO2_Enu[:,0] + subs_matrix_CO2_Enu[:,i]
i = i + 1
print(decomp_tot_CO2_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CO2_Epl(t,remainAGB_CO2_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Epl
#set zero matrix
output_decomp_CO2_Epl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Epl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Epl[i:,i] = decomp_CO2_Epl(t[:len(t)-i],remain_part_CO2_Epl)
print(output_decomp_CO2_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Epl[:,i] = np.diff(output_decomp_CO2_Epl[:,i])
i = i + 1
print(subs_matrix_CO2_Epl[:,:4])
print(len(subs_matrix_CO2_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Epl = subs_matrix_CO2_Epl.clip(max=0)
print(subs_matrix_CO2_Epl[:,:4])
#make the results as absolute values
subs_matrix_CO2_Epl = abs(subs_matrix_CO2_Epl)
print(subs_matrix_CO2_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Epl)
subs_matrix_CO2_Epl = np.vstack((zero_matrix_CO2_Epl, subs_matrix_CO2_Epl))
print(subs_matrix_CO2_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Epl = (tf,1)
decomp_tot_CO2_Epl = np.zeros(matrix_tot_CO2_Epl)
i = 0
while i < tf:
decomp_tot_CO2_Epl[:,0] = decomp_tot_CO2_Epl[:,0] + subs_matrix_CO2_Epl[:,i]
i = i + 1
print(decomp_tot_CO2_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S2nu,label='CO2_S2nu')
plt.plot(t,decomp_tot_CO2_S2pl,label='CO2_S2pl')
plt.plot(t,decomp_tot_CO2_Enu,label='CO2_Enu')
plt.plot(t,decomp_tot_CO2_Epl,label='CO2_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_PF_PO_S2nu = [c_firewood_energy_S2nu, decomp_emissions[:,0], TestDSM2nu.o, PH_Emissions_PO_S2nu, PH_Emissions_HWP_S2nu, decomp_tot_CO2_S2nu[:,0]]
Emissions_PF_PO_S2pl = [c_firewood_energy_S2pl, decomp_emissions[:,0], TestDSM2pl.o, PH_Emissions_PO_S2pl, PH_Emissions_HWP_S2pl, decomp_tot_CO2_S2pl[:,0]]
Emissions_PF_PO_Enu = [c_firewood_energy_Enu, c_pellets_Enu, decomp_emissions[:,0], TestDSM3nu.o, PH_Emissions_PO_Enu, PH_Emissions_HWP_Enu, decomp_tot_CO2_Enu[:,0]]
Emissions_PF_PO_Epl = [c_firewood_energy_Epl, c_pellets_Epl, decomp_emissions[:,0], TestDSM3pl.o, PH_Emissions_PO_Epl, PH_Emissions_HWP_Epl, decomp_tot_CO2_Epl[:,0]]
Emissions_PF_PO_S2nu = [sum(x) for x in zip(*Emissions_PF_PO_S2nu)]
Emissions_PF_PO_S2pl = [sum(x) for x in zip(*Emissions_PF_PO_S2pl)]
Emissions_PF_PO_Enu = [sum(x) for x in zip(*Emissions_PF_PO_Enu)]
Emissions_PF_PO_Epl = [sum(x) for x in zip(*Emissions_PF_PO_Epl)]
#CH4_S2nu
Emissions_CH4_PF_PO_S2nu = decomp_tot_CH4_S2nu[:,0]
#CH4_S2pl
Emissions_CH4_PF_PO_S2pl = decomp_tot_CH4_S2pl[:,0]
#CH4_Enu
Emissions_CH4_PF_PO_Enu = decomp_tot_CH4_Enu[:,0]
#CH4_Epl
Emissions_CH4_PF_PO_Epl = decomp_tot_CH4_Epl[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S2nu = Emissions_PF_PO_S2nu
Col2_S2pl = Emissions_PF_PO_S2pl
Col2_Enu = Emissions_PF_PO_Enu
Col2_Epl = Emissions_PF_PO_Epl
Col3_S2nu = Emissions_CH4_PF_PO_S2nu
Col3_S2pl = Emissions_CH4_PF_PO_S2pl
Col3_Enu = Emissions_CH4_PF_PO_Enu
Col3_Epl = Emissions_CH4_PF_PO_Epl
Col4 = flat_list_nucleus
Col5 = Emission_ref
Col6 = flat_list_plasma
#S2
df2_nu = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S2nu,'kg_CH4':Col3_S2nu,'kg_CO2_seq':Col4,'emission_ref':Col5})
df2_pl = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S2pl,'kg_CH4':Col3_S2pl,'kg_CO2_seq':Col6,'emission_ref':Col5})
#E
df3_nu = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_Enu,'kg_CH4':Col3_Enu,'kg_CO2_seq':Col4,'emission_ref':Col5})
df3_pl = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_Epl,'kg_CH4':Col3_Epl,'kg_CO2_seq':Col6,'emission_ref':Col5})
writer = pd.ExcelWriter('emissions_seq_PF_PO_dim.xlsx', engine = 'xlsxwriter')
df2_nu.to_excel(writer, sheet_name = 'S2_nucleus', header=True, index=False)
df2_pl.to_excel(writer, sheet_name = 'S2_plasma', header=True, index=False)
df3_nu.to_excel(writer, sheet_name = 'E_nucleus', header=True, index=False)
df3_pl.to_excel(writer, sheet_name = 'E_plasma', header=True, index=False)
writer.save()
writer.close()
#%%
## DYNAMIC LCA - wood-based scenarios
# Step (10): Set General Parameters for Dynamic LCA calculation
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
##wood-based
#read S2_nucleus
df = pd.read_excel('emissions_seq_PF_PO_dim.xlsx', 'S2_nucleus') # can also index sheet by name or fetch all sheets
emission_CO2_S2nu = df['kg_CO2'].tolist()
emission_CH4_S2nu = df['kg_CH4'].tolist()
emission_CO2_seq_S2nu = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S2_plasma
df = pd.read_excel('emissions_seq_PF_PO_dim.xlsx', 'S2_plasma')
emission_CO2_S2pl = df['kg_CO2'].tolist()
emission_CH4_S2pl = df['kg_CH4'].tolist()
emission_CO2_seq_S2pl = df['kg_CO2_seq'].tolist()
#read E_nucleus
df = pd.read_excel('emissions_seq_PF_PO_dim.xlsx', 'E_nucleus') # can also index sheet by name or fetch all sheets
emission_CO2_Enu = df['kg_CO2'].tolist()
emission_CH4_Enu = df['kg_CH4'].tolist()
emission_CO2_seq_Enu = df['kg_CO2_seq'].tolist()
#read E_plasma
df = pd.read_excel('emissions_seq_PF_PO_dim.xlsx', 'E_plasma')
emission_CO2_Epl = df['kg_CO2'].tolist()
emission_CH4_Epl = df['kg_CH4'].tolist()
emission_CO2_seq_Epl = df['kg_CO2_seq'].tolist()
#%%
#Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR)
#read S2_nucleus
df = pd.read_excel('NonRW_PF_PO_dim.xlsx', 'PF_PO_S2nu') # can also index sheet by name or fetch all sheets
emission_NonRW_S2nu = df['NonRW_emissions'].tolist()
emission_Diesel_S2nu = df['Diesel_emissions'].tolist()
emission_NonRW_seq_S2nu = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S2_plasma
df = pd.read_excel('NonRW_PF_PO_dim.xlsx', 'PF_PO_S2pl')
emission_NonRW_S2pl = df['NonRW_emissions'].tolist()
emission_Diesel_S2pl = df['Diesel_emissions'].tolist()
emission_NonRW_seq_S2pl = df['kg_CO2_seq'].tolist()
#read E_nucleus
df = pd.read_excel('NonRW_PF_PO_dim.xlsx', 'PF_PO_Enu') # can also index sheet by name or fetch all sheets
emission_NonRW_Enu = df['NonRW_emissions'].tolist()
emission_Diesel_Enu = df['Diesel_emissions'].tolist()
emission_NonRW_seq_Enu = df['kg_CO2_seq'].tolist()
#read E_plasma
df = pd.read_excel('NonRW_PF_PO_dim.xlsx', 'PF_PO_Epl')
emission_NonRW_Epl = df['NonRW_emissions'].tolist()
emission_Diesel_Epl = df['Diesel_emissions'].tolist()
emission_NonRW_seq_Epl = df['kg_CO2_seq'].tolist()
#%%
#Step (15): Determine the time elapsed dynamic characterization factors, DCF(t-ti), for CO2 and CH4
#DCF(t-i) CO2
matrix = (tf-1,tf-1)
DCF_CO2_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CO2_ti[i+1,t] = DCF_inst_CO2[t-i]
i = i + 1
print(DCF_CO2_ti)
#sns.heatmap(DCF_CO2_ti)
DCF_CO2_ti.shape
#DCF(t-i) CH4
matrix = (tf-1,tf-1)
DCF_CH4_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CH4_ti[i+1,t] = DCF_inst_CH4[t-i]
i = i + 1
print(DCF_CH4_ti)
#sns.heatmap(DCF_CH4_ti)
DCF_CH4_ti.shape
#%%
#Step (16): Calculate instantaneous global warming impact (GWI)
##wood-based
#S2_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_S2nu = (tf-1,3)
GWI_inst_S2nu = np.zeros(matrix_GWI_S2nu)
for t in range(0,tf-1):
GWI_inst_S2nu[t,0] = np.sum(np.multiply(emission_CO2_S2nu,DCF_CO2_ti[:,t]))
GWI_inst_S2nu[t,1] = np.sum(np.multiply(emission_CH4_S2nu,DCF_CH4_ti[:,t]))
GWI_inst_S2nu[t,2] = np.sum(np.multiply(emission_CO2_seq_S2nu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S2nu = (tf-1,1)
GWI_inst_tot_S2nu = np.zeros(matrix_GWI_tot_S2nu)
GWI_inst_tot_S2nu[:,0] = np.array(GWI_inst_S2nu[:,0] + GWI_inst_S2nu[:,1] + GWI_inst_S2nu[:,2])
print(GWI_inst_tot_S2nu[:,0])
#S2_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_S2pl = (tf-1,3)
GWI_inst_S2pl = np.zeros(matrix_GWI_S2pl)
for t in range(0,tf-1):
GWI_inst_S2pl[t,0] = np.sum(np.multiply(emission_CO2_S2pl,DCF_CO2_ti[:,t]))
GWI_inst_S2pl[t,1] = np.sum(np.multiply(emission_CH4_S2pl,DCF_CH4_ti[:,t]))
GWI_inst_S2pl[t,2] = np.sum(np.multiply(emission_CO2_seq_S2pl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S2pl = (tf-1,1)
GWI_inst_tot_S2pl = np.zeros(matrix_GWI_tot_S2pl)
GWI_inst_tot_S2pl[:,0] = np.array(GWI_inst_S2pl[:,0] + GWI_inst_S2pl[:,1] + GWI_inst_S2pl[:,2])
print(GWI_inst_tot_S2pl[:,0])
#E_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_Enu = (tf-1,3)
GWI_inst_Enu = np.zeros(matrix_GWI_Enu)
for t in range(0,tf-1):
GWI_inst_Enu[t,0] = np.sum(np.multiply(emission_CO2_Enu,DCF_CO2_ti[:,t]))
GWI_inst_Enu[t,1] = np.sum(np.multiply(emission_CH4_Enu,DCF_CH4_ti[:,t]))
GWI_inst_Enu[t,2] = np.sum(np.multiply(emission_CO2_seq_Enu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_Enu = (tf-1,1)
GWI_inst_tot_Enu = np.zeros(matrix_GWI_tot_Enu)
GWI_inst_tot_Enu[:,0] = np.array(GWI_inst_Enu[:,0] + GWI_inst_Enu[:,1] + GWI_inst_Enu[:,2])
print(GWI_inst_tot_Enu[:,0])
#E_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_Epl = (tf-1,3)
GWI_inst_Epl = np.zeros(matrix_GWI_Epl)
for t in range(0,tf-1):
GWI_inst_Epl[t,0] = np.sum(np.multiply(emission_CO2_Epl,DCF_CO2_ti[:,t]))
GWI_inst_Epl[t,1] = np.sum(np.multiply(emission_CH4_Epl,DCF_CH4_ti[:,t]))
GWI_inst_Epl[t,2] = np.sum(np.multiply(emission_CO2_seq_Epl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_Epl = (tf-1,1)
GWI_inst_tot_Epl = np.zeros(matrix_GWI_tot_Epl)
GWI_inst_tot_Epl[:,0] = np.array(GWI_inst_Epl[:,0] + GWI_inst_Epl[:,1] + GWI_inst_Epl[:,2])
print(GWI_inst_tot_Epl[:,0])
## NonRW
#GWI_inst for all gases
#S2_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S2nu = (tf-1,3)
GWI_inst_NonRW_S2nu = np.zeros(matrix_GWI_NonRW_S2nu)
for t in range(0,tf-1):
GWI_inst_NonRW_S2nu[t,0] = np.sum(np.multiply(emission_NonRW_S2nu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S2nu[t,1] = np.sum(np.multiply(emission_Diesel_S2nu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S2nu[t,2] = np.sum(np.multiply(emission_NonRW_seq_S2nu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S2nu = (tf-1,1)
GWI_inst_tot_NonRW_S2nu = np.zeros(matrix_GWI_tot_NonRW_S2nu)
GWI_inst_tot_NonRW_S2nu[:,0] = np.array(GWI_inst_NonRW_S2nu[:,0] + GWI_inst_NonRW_S2nu[:,1] + GWI_inst_NonRW_S2nu[:,2])
print(GWI_inst_tot_NonRW_S2nu[:,0])
#S2_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S2pl = (tf-1,3)
GWI_inst_NonRW_S2pl = np.zeros(matrix_GWI_NonRW_S2pl)
for t in range(0,tf-1):
GWI_inst_NonRW_S2pl[t,0] = np.sum(np.multiply(emission_NonRW_S2pl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S2pl[t,1] = np.sum(np.multiply(emission_Diesel_S2pl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S2pl[t,2] = np.sum(np.multiply(emission_NonRW_seq_S2pl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S2pl = (tf-1,1)
GWI_inst_tot_NonRW_S2pl = np.zeros(matrix_GWI_tot_NonRW_S2pl)
GWI_inst_tot_NonRW_S2pl[:,0] = np.array(GWI_inst_NonRW_S2pl[:,0] + GWI_inst_NonRW_S2pl[:,1] + GWI_inst_NonRW_S2pl[:,2])
print(GWI_inst_tot_NonRW_S2pl[:,0])
#E_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_Enu = (tf-1,3)
GWI_inst_NonRW_Enu = np.zeros(matrix_GWI_NonRW_Enu)
for t in range(0,tf-1):
GWI_inst_NonRW_Enu[t,0] = np.sum(np.multiply(emission_NonRW_Enu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Enu[t,1] = np.sum(np.multiply(emission_Diesel_Enu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Enu[t,2] = np.sum(np.multiply(emission_NonRW_seq_Enu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_Enu = (tf-1,1)
GWI_inst_tot_NonRW_Enu = np.zeros(matrix_GWI_tot_NonRW_Enu)
GWI_inst_tot_NonRW_Enu[:,0] = np.array(GWI_inst_NonRW_Enu[:,0] + GWI_inst_NonRW_Enu[:,1] + GWI_inst_NonRW_Enu[:,2])
print(GWI_inst_tot_NonRW_Enu[:,0])
#E_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_Epl = (tf-1,3)
GWI_inst_NonRW_Epl = np.zeros(matrix_GWI_NonRW_Epl)
for t in range(0,tf-1):
GWI_inst_NonRW_Epl[t,0] = np.sum(np.multiply(emission_NonRW_Epl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Epl[t,1] = np.sum(np.multiply(emission_Diesel_Epl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Epl[t,2] = np.sum(np.multiply(emission_NonRW_seq_Epl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_Epl = (tf-1,1)
GWI_inst_tot_NonRW_Epl = np.zeros(matrix_GWI_tot_NonRW_Epl)
GWI_inst_tot_NonRW_Epl[:,0] = np.array(GWI_inst_NonRW_Epl[:,0] + GWI_inst_NonRW_Epl[:,1] + GWI_inst_NonRW_Epl[:,2])
print(GWI_inst_tot_NonRW_Epl[:,0])
t = np.arange(0,tf-1,1)
#create zero list to highlight the horizontal line for 0
def zerolistmaker(n):
listofzeros = [0] * (n)
return listofzeros
#convert to flat list
GWI_inst_tot_NonRW_S2nu = np.array([item for sublist in GWI_inst_tot_NonRW_S2nu for item in sublist])
GWI_inst_tot_NonRW_S2pl = np.array([item for sublist in GWI_inst_tot_NonRW_S2pl for item in sublist])
GWI_inst_tot_NonRW_Enu = np.array([item for sublist in GWI_inst_tot_NonRW_Enu for item in sublist])
GWI_inst_tot_NonRW_Epl = np.array([item for sublist in GWI_inst_tot_NonRW_Epl for item in sublist])
GWI_inst_tot_S2nu = np.array([item for sublist in GWI_inst_tot_S2nu for item in sublist])
GWI_inst_tot_S2pl = np.array([item for sublist in GWI_inst_tot_S2pl for item in sublist])
GWI_inst_tot_Enu = np.array([item for sublist in GWI_inst_tot_Enu for item in sublist])
GWI_inst_tot_Epl = np.array([item for sublist in GWI_inst_tot_Epl for item in sublist])
plt.plot(t, GWI_inst_tot_NonRW_S2nu, color='lightcoral', label='NR_M_nucleus', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_S2pl, color='deeppink', label='NR_M_plasma', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
plt.plot(t, GWI_inst_tot_S2nu, color='lightcoral', label='M_nucleus')
plt.plot(t, GWI_inst_tot_S2pl, color='deeppink', label='M_plasma')
plt.plot(t, GWI_inst_tot_Enu, color='royalblue', label='E_nucleus')
plt.plot(t, GWI_inst_tot_Epl, color='deepskyblue', label='E_plasma')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWI_inst_tot_NonRW_Enu, GWI_inst_tot_NonRW_S2pl, color='lightcoral', alpha=0.3)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.xlim(0,200)
plt.ylim(-0.5e-9,1.4e-9)
plt.title('Instantaneous GWI, PF_PO')
plt.xlabel('Time (year)')
#plt.ylabel('GWI_inst (10$^{-13}$ W/m$^2$)')
plt.ylabel('GWI_inst (W/m$^2$)')
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_inst_NonRW_PF_PO_dim', dpi=300)
plt.show()
#%%
#Step (17): Calculate cumulative global warming impact (GWI)
##wood-based
GWI_cum_S2nu = np.cumsum(GWI_inst_tot_S2nu)
GWI_cum_S2pl = np.cumsum(GWI_inst_tot_S2pl)
GWI_cum_Enu = np.cumsum(GWI_inst_tot_Enu)
GWI_cum_Epl = np.cumsum(GWI_inst_tot_Epl)
##NonRW
GWI_cum_NonRW_S2nu = np.cumsum(GWI_inst_tot_NonRW_S2nu)
GWI_cum_NonRW_S2pl = np.cumsum(GWI_inst_tot_NonRW_S2pl)
GWI_cum_NonRW_Enu = np.cumsum(GWI_inst_tot_NonRW_Enu)
GWI_cum_NonRW_Epl = np.cumsum(GWI_inst_tot_NonRW_Epl)
plt.xlabel('Time (year)')
#plt.ylabel('GWI_cum (10$^{-11}$ W/m$^2$)')
plt.ylabel('GWI_cum (W/m$^2$)')
plt.xlim(0,200)
plt.ylim(-0.3e-7,2e-7)
plt.title('Cumulative GWI, PF_PO')
plt.plot(t, GWI_cum_NonRW_S2nu, color='lightcoral', label='NR_M_nucleus', ls='--')
plt.plot(t, GWI_cum_NonRW_S2pl, color='deeppink', label='NR_M_plasma', ls='--')
plt.plot(t, GWI_cum_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
plt.plot(t, GWI_cum_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
plt.plot(t, GWI_cum_S2nu, color='lightcoral', label='M_nucleus')
plt.plot(t, GWI_cum_S2pl, color='deeppink', label='M_plasma')
plt.plot(t, GWI_cum_Enu, color='royalblue', label='E_nucleus')
plt.plot(t, GWI_cum_Epl, color='deepskyblue', label='E_plasma')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWI_cum_NonRW_Enu, GWI_cum_NonRW_S2pl, color='lightcoral', alpha=0.3)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_cum_NonRW_PF_PO_dim', dpi=300)
plt.show()
#%%
#Step (18): Determine the Instantenous and Cumulative GWI for the emission reference (1 kg CO2 emission at time zero) before performing dynamic GWP calculation
#determine the GWI inst for the emission reference (1 kg CO2 emission at time zero)
t = np.arange(0,tf-1,1)
matrix_GWI_ref = (tf-1,1)
GWI_inst_ref = np.zeros(matrix_GWI_S2nu)
for t in range(0,tf-1):
GWI_inst_ref[t,0] = np.sum(np.multiply(emission_CO2_ref,DCF_CO2_ti[:,t]))
#print(GWI_inst_ref[:,0])
len(GWI_inst_ref)
#determine the GWI cumulative for the emission reference
t = np.arange(0,tf-1,1)
GWI_cum_ref = np.cumsum(GWI_inst_ref[:,0])
#print(GWI_cum_ref)
plt.xlabel('Time (year)')
plt.ylabel('GWI_cum_ref (10$^{-13}$ W/m$^2$.kgCO$_2$)')
plt.plot(t, GWI_cum_ref)
len(GWI_cum_ref)
#%%
#Step (19): Calculate dynamic global warming potential (GWPdyn)
#convert the GWPdyn to tCO2 (divided by 1000)
##wood-based
GWP_dyn_cum_S2nu = [x/(y*1000) for x,y in zip(GWI_cum_S2nu, GWI_cum_ref)]
GWP_dyn_cum_S2pl = [x/(y*1000) for x,y in zip(GWI_cum_S2pl, GWI_cum_ref)]
GWP_dyn_cum_Enu = [x/(y*1000) for x,y in zip(GWI_cum_Enu, GWI_cum_ref)]
GWP_dyn_cum_Epl = [x/(y*1000) for x,y in zip(GWI_cum_Epl, GWI_cum_ref)]
##NonRW
GWP_dyn_cum_NonRW_S2nu = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S2nu, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S2pl = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S2pl, GWI_cum_ref)]
GWP_dyn_cum_NonRW_Enu = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_Enu, GWI_cum_ref)]
GWP_dyn_cum_NonRW_Epl = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_Epl, GWI_cum_ref)]
fig=plt.figure()
fig.show()
ax=fig.add_subplot(111)
ax.plot(t, GWP_dyn_cum_NonRW_S2nu, color='lightcoral', label='NR_M_nucleus', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_S2pl, color='deeppink', label='NR_M_plasma', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
ax.plot(t, GWP_dyn_cum_S2nu, color='lightcoral', label='M_nucleus')
ax.plot(t, GWP_dyn_cum_S2pl, color='deeppink', label='M_plasma')
ax.plot(t, GWP_dyn_cum_Enu, color='royalblue', label='E_nucleus')
ax.plot(t, GWP_dyn_cum_Epl, color='deepskyblue', label='E_plasma')
ax.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWP_dyn_cum_NonRW_Enu, GWP_dyn_cum_NonRW_S2pl, color='lightcoral', alpha=0.3)
plt.grid(True)
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax.set_xlabel('Time (year)')
ax.set_ylabel('GWP$_{dyn}$ (t-CO$_2$-eq)')
ax.set_xlim(0,200)
ax.set_ylim(-250,1400)
ax.set_title('Dynamic GWP, PF_PO')
plt.draw()
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_cum_NonRW_PF_PO_dim', dpi=300)
#%%
#Step (20): Exporting the data behind result graphs to Excel
year = []
for x in range (0, 201):
year.append(x)
### Create Column
Col1 = year
##GWI_Inst
#GWI_inst from wood-based scenarios
Col_GI_3 = GWI_inst_tot_S2nu
Col_GI_4 = GWI_inst_tot_S2pl
Col_GI_5 = GWI_inst_tot_Enu
Col_GI_6 = GWI_inst_tot_Epl
#print(Col_GI_1)
#print(np.shape(Col_GI_1))
#GWI_inst from counter use scenarios
Col_GI_9 = GWI_inst_tot_NonRW_S2nu
Col_GI_10 = GWI_inst_tot_NonRW_S2pl
Col_GI_11 = GWI_inst_tot_NonRW_Enu
Col_GI_12 = GWI_inst_tot_NonRW_Epl
#print(Col_GI_7)
#print(np.shape(Col_GI_7))
#create column results
##GWI_cumulative
#GWI_cumulative from wood-based scenarios
Col_GC_3 = GWI_cum_S2nu
Col_GC_4 = GWI_cum_S2pl
Col_GC_5 = GWI_cum_Enu
Col_GC_6 = GWI_cum_Epl
#GWI_cumulative from counter use scenarios
Col_GC_9 = GWI_cum_NonRW_S2nu
Col_GC_10 = GWI_cum_NonRW_S2pl
Col_GC_11 = GWI_cum_NonRW_Enu
Col_GC_12 = GWI_cum_NonRW_Epl
#create column results
##GWPdyn
#GWPdyn from wood-based scenarios
Col_GWP_3 = GWP_dyn_cum_S2nu
Col_GWP_4 = GWP_dyn_cum_S2pl
Col_GWP_5 = GWP_dyn_cum_Enu
Col_GWP_6 = GWP_dyn_cum_Epl
#GWPdyn from counter use scenarios
Col_GWP_9 = GWP_dyn_cum_NonRW_S2nu
Col_GWP_10 = GWP_dyn_cum_NonRW_S2pl
Col_GWP_11 = GWP_dyn_cum_NonRW_Enu
Col_GWP_12 = GWP_dyn_cum_NonRW_Epl
#Create colum results
dfM_GI = pd.DataFrame.from_dict({'Year':Col1,'M_nucleus (W/m2)':Col_GI_3, 'M_plasma (W/m2)':Col_GI_4,
'E_nucleus (W/m2)':Col_GI_5, 'E_plasma (W/m2)':Col_GI_6,
'NR_M_nucleus (W/m2)':Col_GI_9, 'NR_M_plasma (W/m2)':Col_GI_10,
'NR_E_nucleus (W/m2)':Col_GI_11, 'NR_E_plasma (W/m2)':Col_GI_12})
dfM_GC = pd.DataFrame.from_dict({'Year':Col1,'M_nucleus (W/m2)':Col_GC_3, 'M_plasma (W/m2)':Col_GC_4,
'E_nucleus (W/m2)':Col_GC_5, 'E_plasma (W/m2)':Col_GC_6,
'NR_M_nucleus (W/m2)':Col_GC_9, 'NR_M_plasma (W/m2)':Col_GC_10,
'NR_E_nucleus (W/m2)':Col_GC_11, 'NR_E_plasma (W/m2)':Col_GC_12})
dfM_GWPdyn = pd.DataFrame.from_dict({'Year':Col1,'M_nucleus (t-CO2eq)':Col_GWP_3, 'M_plasma (t-CO2eq)':Col_GWP_4,
'E_nucleus (t-CO2eq)':Col_GWP_5, 'E_plasma (t-CO2eq)':Col_GWP_6,
'NR_M_nucleus (t-CO2eq)':Col_GWP_9, 'NR_M_plasma (t-CO2eq)':Col_GWP_10,
'NR_E_nucleus (t-CO2eq)':Col_GWP_11, 'NR_E_plasma (t-CO2eq)':Col_GWP_12})
#Export to excel
writer = pd.ExcelWriter('GraphResults_PF_PO_dim.xlsx', engine = 'xlsxwriter')
#GWI_inst
dfM_GI.to_excel(writer, sheet_name = 'GWI_Inst_PF_PO', header=True, index=False)
#GWI cumulative
dfM_GC.to_excel(writer, sheet_name = 'Cumulative GWI_PF_PO', header=True, index=False)
#GWP_dyn
dfM_GWPdyn.to_excel(writer, sheet_name = 'GWPdyn_PF_PO', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (21): Generate the excel file for the individual carbon emission and sequestration flows
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
print(len(year))
division = 1000*44/12
division_CH4 = 1000*16/12
#M_nu
c_firewood_energy_S2nu = [x/division for x in c_firewood_energy_S2nu]
decomp_emissions[:,0] = [x/division for x in decomp_emissions[:,0]]
TestDSM2nu.o = [x/division for x in TestDSM2nu.o]
PH_Emissions_PO_S2nu = [x/division for x in PH_Emissions_PO_S2nu]
PH_Emissions_HWP_S2nu = [x/division for x in PH_Emissions_HWP_S2nu]
#OC_storage_S2nu = [x/division for x in OC_storage_S2nu]
flat_list_nucleus = [x/division for x in flat_list_nucleus]
decomp_tot_CO2_S2nu[:,0] = [x/division for x in decomp_tot_CO2_S2nu[:,0]]
decomp_tot_CH4_S2nu[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S2nu[:,0]]
#M_pl
c_firewood_energy_S2pl = [x/division for x in c_firewood_energy_S2pl]
TestDSM2pl.o = [x/division for x in TestDSM2pl.o]
PH_Emissions_PO_S2pl = [x/division for x in PH_Emissions_PO_S2pl]
PH_Emissions_HWP_S2pl = [x/division for x in PH_Emissions_HWP_S2pl]
#OC_storage_S2pl = [x/division for x in OC_storage_S2pl]
flat_list_plasma = [x/division for x in flat_list_plasma]
decomp_tot_CO2_S2pl[:,0] = [x/division for x in decomp_tot_CO2_S2pl[:,0]]
decomp_tot_CH4_S2pl[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S2pl[:,0]]
#Enu
c_firewood_energy_Enu = [x/division for x in c_firewood_energy_Enu]
c_pellets_Enu = [x/division for x in c_pellets_Enu]
TestDSM3nu.o = [x/division for x in TestDSM3nu.o]
PH_Emissions_PO_Enu = [x/division for x in PH_Emissions_PO_Enu]
PH_Emissions_HWP_Enu = [x/division for x in PH_Emissions_HWP_Enu]
#OC_storage_Enu = [x/division for x in OC_storage_Enu]
decomp_tot_CO2_Enu[:,0] = [x/division for x in decomp_tot_CO2_Enu[:,0]]
decomp_tot_CH4_Enu[:,0] = [x/division_CH4 for x in decomp_tot_CH4_Enu[:,0]]
#Epl
c_firewood_energy_Epl = [x/division for x in c_firewood_energy_Epl]
c_pellets_Epl = [x/division for x in c_pellets_Epl]
TestDSM3pl.o = [x/division for x in TestDSM3pl.o]
PH_Emissions_PO_Epl = [x/division for x in PH_Emissions_PO_Epl]
PH_Emissions_HWP_Epl = [x/division for x in PH_Emissions_HWP_Epl]
#OC_storage_Epl = [x/division for x in OC_storage_Epl]
decomp_tot_CO2_Epl[:,0] = [x/division for x in decomp_tot_CO2_Epl[:,0]]
decomp_tot_CH4_Epl[:,0] = [x/division_CH4 for x in decomp_tot_CH4_Epl[:,0]]
#landfill aggregate flows
Landfill_decomp_PF_PO_S2nu = decomp_tot_CH4_S2nu, decomp_tot_CO2_S2nu
Landfill_decomp_PF_PO_S2pl = decomp_tot_CH4_S2pl, decomp_tot_CO2_S2pl
Landfill_decomp_PF_PO_Enu = decomp_tot_CH4_Enu, decomp_tot_CO2_Enu
Landfill_decomp_PF_PO_Epl = decomp_tot_CH4_Epl, decomp_tot_CO2_Epl
Landfill_decomp_PF_PO_S2nu = [sum(x) for x in zip(*Landfill_decomp_PF_PO_S2nu)]
Landfill_decomp_PF_PO_S2pl = [sum(x) for x in zip(*Landfill_decomp_PF_PO_S2pl)]
Landfill_decomp_PF_PO_Enu = [sum(x) for x in zip(*Landfill_decomp_PF_PO_Enu)]
Landfill_decomp_PF_PO_Epl = [sum(x) for x in zip(*Landfill_decomp_PF_PO_Epl)]
Landfill_decomp_PF_PO_S2nu = [item for sublist in Landfill_decomp_PF_PO_S2nu for item in sublist]
Landfill_decomp_PF_PO_S2pl = [item for sublist in Landfill_decomp_PF_PO_S2pl for item in sublist]
Landfill_decomp_PF_PO_Enu = [item for sublist in Landfill_decomp_PF_PO_Enu for item in sublist]
Landfill_decomp_PF_PO_Epl = [item for sublist in Landfill_decomp_PF_PO_Epl for item in sublist]
#Wood processing aggregate flows
OpProcessing_PF_PO_S2nu = [x + y for x, y in zip(PH_Emissions_PO_S2nu, PH_Emissions_HWP_S2nu)]
OpProcessing_PF_PO_S2pl = [x + y for x, y in zip(PH_Emissions_PO_S2pl, PH_Emissions_HWP_S2pl)]
OpProcessing_PF_PO_Enu = [x + y for x, y in zip(PH_Emissions_PO_Enu, PH_Emissions_HWP_Enu)]
OpProcessing_PF_PO_Epl = [x + y for x, y in zip(PH_Emissions_PO_Epl, PH_Emissions_HWP_Epl)]
#M_nu
Column1 = year
Column2 = c_firewood_energy_S2nu
Column3 = decomp_emissions[:,0]
Column4 = TestDSM2nu.o
Column5 = OpProcessing_PF_PO_S2nu
#Column7_1 = OC_storage_S2nu
Column7 = Landfill_decomp_PF_PO_S2nu
Column8 = flat_list_nucleus
#M_pl
Column9 = c_firewood_energy_S2pl
Column10 = TestDSM2pl.o
Column11 = OpProcessing_PF_PO_S2pl
#Column13_1 = OC_storage_S2pl
Column13 = Landfill_decomp_PF_PO_S2pl
Column14 = flat_list_plasma
#E_nu
Column15 = c_firewood_energy_Enu
Column15_1 = c_pellets_Enu
Column16 = TestDSM3nu.o
Column17 = OpProcessing_PF_PO_Enu
#Column19_1 = OC_storage_Enu
Column19 = Landfill_decomp_PF_PO_Enu
#E_pl
Column20 = c_firewood_energy_Epl
Column20_1 = c_pellets_Epl
Column21 = TestDSM3pl.o
Column22 = OpProcessing_PF_PO_Epl
#Column24_1 = OC_storage_Epl
Column24 = Landfill_decomp_PF_PO_Epl
#M
dfM_nu = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column8,
#'9: Landfill storage (t-C)':Column7_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column2,
'F8-0: Operational stage/processing emissions (t-C)':Column5,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column4,
'F7-0: Landfill gas decomposition (t-C)':Column7})
dfM_pl = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column14,
#'9: Landfill storage (t-C)':Column13_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column9,
'F8-0: Operational stage/processing emissions (t-C)':Column11,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column10,
'F7-0: Landfill gas decomposition (t-C)':Column13})
#E
dfE_nu = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column8,
#'9: Landfill storage (t-C)':Column19_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column15,
'F8-0: Operational stage/processing emissions (t-C)':Column17,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column16,
'F4-0: Emissions from wood pellets use (t-C)':Column15_1,
'F7-0: Landfill gas decomposition (t-C)':Column19})
dfE_pl = pd.DataFrame.from_dict({'Year':Column1, 'F0-1: Biomass C sequestration (t-C)':Column14,
#'9: Landfill storage (t-C)':Column24_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column20,
'F8-0: Operational stage/processing emissions (t-C)':Column22,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column21,
'F4-0: Emissions from wood pellets use (t-C)':Column20_1,
'F7-0: Landfill gas decomposition (t-C)':Column24})
writer = pd.ExcelWriter('C_flows_PF_PO_dim.xlsx', engine = 'xlsxwriter')
dfM_nu.to_excel(writer, sheet_name = 'PF_PO_M_nu', header=True, index=False)
dfM_pl.to_excel(writer, sheet_name = 'PF_PO_M_pl', header=True, index=False)
dfE_nu.to_excel(writer, sheet_name = 'PF_PO_E_nu', header=True, index=False)
dfE_pl.to_excel(writer, sheet_name = 'PF_PO_E_pl', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (22): Plot of the individual carbon emission and sequestration flows for normal and symlog-scale graphs
#PF_PO_M_nu
fig=plt.figure()
fig.show()
ax1=fig.add_subplot(111)
# plot
ax1.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1.plot(t, OC_storage_S2nu, color='darkturquoise', label='9: Landfill storage')
ax1.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax1.plot(t, c_firewood_energy_S2nu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1.plot(t, OpProcessing_PF_PO_S2nu, color='orange', label='F8-0: Operational stage/processing emissions')
ax1.plot(t, TestDSM2nu.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1.plot(t, Landfill_decomp_PF_PO_S2nu, color='yellow', label='F7-0: Landfill gas decomposition')
ax1.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1.set_xlim(-1,200)
ax1.set_yscale('symlog')
ax1.set_xlabel('Time (year)')
ax1.set_ylabel('C flows (t-C) (symlog)')
ax1.set_title('Carbon flow, PF_PO_M_nucleus (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_M_nu
f, (ax_a, ax_b) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_a.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_a.plot(t, OC_storage_S2nu, color='darkturquoise', label='9: Landfill storage')
ax_a.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax_a.plot(t, c_firewood_energy_S2nu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_a.plot(t, OpProcessing_PF_PO_S2nu, color='orange', label='F8-0: Operational stage/processing emissions')
ax_a.plot(t, TestDSM2nu.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax_a.plot(t, Landfill_decomp_PF_PO_S2nu, color='yellow', label='F7-0: Landfill gas decomposition')
ax_b.plot(t, c_firewood_energy_S2nu, color='mediumseagreen')
ax_b.plot(t, decomp_emissions[:,0], color='lightcoral')
#ax_b.plot(t, OC_storage_S2nu, color='darkturquoise')
ax_b.plot(t, TestDSM2nu.o, color='royalblue')
ax_b.plot(t, OpProcessing_PF_PO_S2nu, color='orange')
ax_b.plot(t, Landfill_decomp_PF_PO_S2nu, color='yellow')
ax_b.plot(t, flat_list_nucleus, color='darkkhaki')
#ax_a.set_yscale('log')
#ax_b.set_yscale('log')
# zoom-in / limit the view to different portions of the data
ax_a.set_xlim(-1,200)
ax_a.set_ylim(60, 75)
ax_b.set_ylim(-25, 35)
#ax_b.set_ylim(-0.3, 0.5)
# hide the spines between ax and ax2
ax_a.spines['bottom'].set_visible(False)
ax_b.spines['top'].set_visible(False)
ax_a.xaxis.tick_top()
ax_a.tick_params(labeltop=False) # don't put tick labels at the top
ax_b.xaxis.tick_bottom()
ax_a.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_a.transAxes, color='k', clip_on=False)
ax_a.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_a.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_b.transAxes) # switch to the bottom axes
ax_b.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_b.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_b.set_xlabel('Time (year)')
ax_b.set_ylabel('C flows (t-C)')
ax_a.set_ylabel('C flows (t-C)')
ax_a.set_title('Carbon flow, PF_PO_M_nucleus')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - symlog-scale graphs
#PF_PO_M_pl
fig=plt.figure()
fig.show()
ax2=fig.add_subplot(111)
# plot
ax2.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2.plot(t, OC_storage_S2pl, color='darkturquoise', label='9: Landfill storage')
ax2.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax2.plot(t, c_firewood_energy_S2pl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2.plot(t, OpProcessing_PF_PO_S2pl, color='orange', label='F8-0: Operational stage/processing emissions')
ax2.plot(t, TestDSM2pl.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax2.plot(t, Landfill_decomp_PF_PO_S2pl, color='yellow', label='F7-0: Landfill gas decomposition')
ax2.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2.set_xlim(-1,200)
ax2.set_yscale('symlog')
ax2.set_xlabel('Time (year)')
ax2.set_ylabel('C flows (t-C) (symlog)')
ax2.set_title('Carbon flow, PF_PO_M_plasma (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_M_pl
f, (ax_c, ax_d) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_c.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_c.plot(t, OC_storage_S2pl, color='darkturquoise', label='9: Landfill storage')
ax_c.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax_c.plot(t, c_firewood_energy_S2pl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_c.plot(t, OpProcessing_PF_PO_S2pl, color='orange', label='F8-0: Operational stage/processing emissions')
ax_c.plot(t, TestDSM2pl.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax_c.plot(t, Landfill_decomp_PF_PO_S2pl, color='yellow', label='F7-0: Landfill gas decomposition')
ax_d.plot(t, c_firewood_energy_S2pl, color='mediumseagreen')
ax_d.plot(t, decomp_emissions[:,0], color='lightcoral')
#ax_d.plot(t, OC_storage_S2pl, color='darkturquoise')
ax_d.plot(t, TestDSM2pl.o, color='royalblue')
ax_d.plot(t, OpProcessing_PF_PO_S2pl, color='orange')
ax_d.plot(t, Landfill_decomp_PF_PO_S2pl, color='yellow')
ax_d.plot(t, flat_list_plasma, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_c.set_xlim(-1,200)
ax_c.set_ylim(60, 75)
ax_d.set_ylim(-25, 35)
# hide the spines between ax and ax2
ax_c.spines['bottom'].set_visible(False)
ax_d.spines['top'].set_visible(False)
ax_c.xaxis.tick_top()
ax_c.tick_params(labeltop=False) # don't put tick labels at the top
ax_d.xaxis.tick_bottom()
ax_c.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_c.transAxes, color='k', clip_on=False)
ax_c.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_c.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_d.transAxes) # switch to the bottom axes
ax_d.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_d.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_d.set_xlabel('Time (year)')
ax_d.set_ylabel('C flows (t-C)')
ax_c.set_ylabel('C flows (t-C)')
ax_c.set_title('Carbon flow, PF_PO_M_plasma')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - symlog-scale graphs
#PF_PO_E_nu
fig=plt.figure()
fig.show()
ax3=fig.add_subplot(111)
# plot
ax3.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax3.plot(t, OC_storage_Enu, color='darkturquoise', label='9: Landfill storage')
ax3.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax3.plot(t, c_firewood_energy_Enu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax3.plot(t, OpProcessing_PF_PO_Enu, color='orange', label='F8-0: Operational stage/processing emissions')
ax3.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow', label='F7-0: Landfill gas decomposition')
ax3.plot(t, c_pellets_Enu, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax3.plot(t, TestDSM3nu.o, color='royalblue', label='in-use stock output')
ax3.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax3.set_xlim(-1,200)
ax3.set_yscale('symlog')
ax3.set_xlabel('Time (year)')
ax3.set_ylabel('C flows (t-C) (symlog)')
ax3.set_title('Carbon flow, PF_PO_E_nucleus (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_E_nu
f, (ax_e, ax_f) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_e.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_e.plot(t, OC_storage_Enu, color='darkturquoise', label='9: Landfill storage')
ax_e.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax_e.plot(t, c_firewood_energy_Enu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_e.plot(t, OpProcessing_PF_PO_Enu, color='orange', label='F8-0: Operational stage/processing emissions')
ax_e.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow', label='F7-0: Landfill gas decomposition')
ax_e.plot(t, c_pellets_Enu, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax_e.plot(t, TestDSM3nu.o, color='royalblue', label='in-use stock output')
ax_f.plot(t, c_firewood_energy_Enu, color='mediumseagreen')
ax_f.plot(t, decomp_emissions[:,0], color='lightcoral')
ax_f.plot(t, c_pellets_Enu, color='slategrey')
#ax_f.plot(t, TestDSM3nu.o, color='royalblue')
#ax_f.plot(t, OC_storage_Enu, color='darkturquoise')
ax_f.plot(t, OpProcessing_PF_PO_Enu, color='orange')
ax_f.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow')
ax_f.plot(t, flat_list_nucleus, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_e.set_xlim(-1,200)
ax_e.set_ylim(170, 190)
ax_f.set_ylim(-25, 30)
# hide the spines between ax and ax2
ax_e.spines['bottom'].set_visible(False)
ax_f.spines['top'].set_visible(False)
ax_e.xaxis.tick_top()
ax_e.tick_params(labeltop=False) # don't put tick labels at the top
ax_f.xaxis.tick_bottom()
ax_e.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_e.transAxes, color='k', clip_on=False)
ax_e.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_e.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_f.transAxes) # switch to the bottom axes
ax_f.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_f.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_f.set_xlabel('Time (year)')
ax_f.set_ylabel('C flows (t-C)')
ax_e.set_ylabel('C flows (t-C)')
ax_e.set_title('Carbon flow, PF_PO_E_nucleus')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - symlog-scale graphs
#PF_PO_E_pl
fig=plt.figure()
fig.show()
ax4=fig.add_subplot(111)
# plot
ax4.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax4.plot(t, OC_storage_Epl, color='darkturquoise', label='9: Landfill storage')
ax4.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax4.plot(t, c_firewood_energy_Epl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax4.plot(t, OpProcessing_PF_PO_Epl, color='orange', label='F8-0: Operational stage/processing emissions')
ax4.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow', label='F7-0: Landfill gas decomposition')
#ax4.plot(t, TestDSM3pl.o, color='royalblue', label='in-use stock output')
ax4.plot(t, c_pellets_Epl, color='slategrey', label='F4-0: Emissions from wood pellets use')
ax4.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax4.set_xlim(-1,200)
ax4.set_yscale('symlog')
ax4.set_xlabel('Time (year)')
ax4.set_ylabel('C flows (t-C) (symlog)')
ax4.set_title('Carbon flow, PF_PO_E_plasma (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_E_pl
f, (ax_g, ax_h) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_g.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_g.plot(t, OC_storage_Epl, color='darkturquoise', label='9: Landfill storage')
ax_g.plot(t, decomp_emissions[:,0], color='lightcoral', label='F1-0: Residue decomposition')
ax_g.plot(t, c_firewood_energy_Epl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_g.plot(t, OpProcessing_PF_PO_Epl, color='orange', label='F8-0: Operational stage/processing emissions')
ax_g.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow', label='F7-0: Landfill gas decomposition')
#ax_g.plot(t, TestDSM3pl.o, color='royalblue', label='in-use stock output')
ax_g.plot(t, c_pellets_Epl, color='slategrey', label='F4-0: Emissions from wood pellets use')
ax_h.plot(t, c_firewood_energy_Epl, color='mediumseagreen')
ax_h.plot(t, c_pellets_Epl, color='slategrey')
ax_h.plot(t, decomp_emissions[:,0], color='lightcoral')
#ax_h.plot(t, TestDSM3pl.o, color='royalblue')
ax_h.plot(t, OpProcessing_PF_PO_Epl, color='orange')
#ax_h.plot(t, OC_storage_Epl, color='darkturquoise')
ax_h.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow')
ax_h.plot(t, flat_list_plasma, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_g.set_xlim(-1,200)
ax_g.set_ylim(170, 190)
ax_h.set_ylim(-25, 30)
# hide the spines between ax and ax2
ax_g.spines['bottom'].set_visible(False)
ax_h.spines['top'].set_visible(False)
ax_g.xaxis.tick_top()
ax_g.tick_params(labeltop=False) # don't put tick labels at the top
ax_h.xaxis.tick_bottom()
ax_g.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_g.transAxes, color='k', clip_on=False)
ax_g.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_g.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_h.transAxes) # switch to the bottom axes
ax_h.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_h.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_h.set_xlabel('Time (year)')
ax_h.set_ylabel('C flows (t-C)')
ax_g.set_ylabel('C flows (t-C)')
ax_g.set_title('Carbon flow, PF_PO_E_plasma')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#Step (23): Generate the excel file for the net carbon balance
Agg_Cflow_PF_PO_S2nu = [c_firewood_energy_S2nu, decomp_emissions[:,0], TestDSM2nu.o, OpProcessing_PF_PO_S2nu, Landfill_decomp_PF_PO_S2nu, flat_list_nucleus]
Agg_Cflow_PF_PO_S2pl = [c_firewood_energy_S2pl, decomp_emissions[:,0], TestDSM2pl.o, OpProcessing_PF_PO_S2pl, Landfill_decomp_PF_PO_S2pl, flat_list_plasma]
Agg_Cflow_PF_PO_Enu = [c_firewood_energy_Enu, c_pellets_Enu, decomp_emissions[:,0], TestDSM3nu.o, OpProcessing_PF_PO_Enu, Landfill_decomp_PF_PO_Enu, flat_list_nucleus]
Agg_Cflow_PF_PO_Epl = [c_firewood_energy_Epl, c_pellets_Epl, decomp_emissions[:,0], TestDSM3pl.o, OpProcessing_PF_PO_Epl, Landfill_decomp_PF_PO_Epl, flat_list_plasma]
Agg_Cflow_PF_PO_S2nu = [sum(x) for x in zip(*Agg_Cflow_PF_PO_S2nu)]
Agg_Cflow_PF_PO_S2pl = [sum(x) for x in zip(*Agg_Cflow_PF_PO_S2pl)]
Agg_Cflow_PF_PO_Enu = [sum(x) for x in zip(*Agg_Cflow_PF_PO_Enu)]
Agg_Cflow_PF_PO_Epl = [sum(x) for x in zip(*Agg_Cflow_PF_PO_Epl)]
fig=plt.figure()
fig.show()
ax5=fig.add_subplot(111)
# plot
ax5.plot(t, Agg_Cflow_PF_PO_S2nu, color='orange', label='M_nucleus')
ax5.plot(t, Agg_Cflow_PF_PO_S2pl, color='darkturquoise', label='M_plasma')
ax5.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral', label='E_nucleus')
ax5.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen', label='E_plasma')
ax5.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax5.set_xlim(-0.3,85)
#ax5.set_yscale('symlog')
ax5.set_xlabel('Time (year)')
ax5.set_ylabel('C flows (t-C)')
ax5.set_title('Net carbon balance, PF_PO')
plt.show()
#create column year
year = []
for x in range (0, 201):
year.append(x)
print (year)
#Create colum results
dfM_PF_PO = pd.DataFrame.from_dict({'Year':year,'M_nucleus (t-C)':Agg_Cflow_PF_PO_S2nu, 'M_plasma (t-C)':Agg_Cflow_PF_PO_S2pl,
'E_nucleus (t-C)':Agg_Cflow_PF_PO_Enu, 'E_plasma (t-C)':Agg_Cflow_PF_PO_Epl})
#Export to excel
writer = pd.ExcelWriter('AggCFlow_PF_PO_dim.xlsx', engine = 'xlsxwriter')
dfM_PF_PO.to_excel(writer, sheet_name = 'PF_PO', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (24): Plot the net carbon balance
##Net carbon balance for M and E (axis break)
f, (ax5a, ax5b) = plt.subplots(2, 1, sharex=True)
ax5a.plot(t, Agg_Cflow_PF_PO_S2nu, color='orange', label='M_nucleus')
ax5a.plot(t, Agg_Cflow_PF_PO_S2pl, color='darkturquoise', label='M_plasma')
ax5a.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral', label='E_nucleus')
ax5a.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen', label='E_plasma')
ax5a.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
ax5b.plot(t, Agg_Cflow_PF_PO_S2nu, color='orange')
ax5b.plot(t, Agg_Cflow_PF_PO_S2pl, color='darkturquoise')
ax5b.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral')
ax5b.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen')
ax5b.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
# zoom-in / limit the view to different portions of the data
ax5a.set_xlim(-0.35,85)
#ax5a.set_xlim(-1,200)
ax5a.set_ylim(210, 230)
ax5b.set_xlim(-0.35,85)
#ax5b.set_xlim(-1,200)
ax5b.set_ylim(-5, 50)
# hide the spines between ax and ax2
ax5a.spines['bottom'].set_visible(False)
ax5b.spines['top'].set_visible(False)
ax5a.xaxis.tick_top()
ax5a.tick_params(labeltop=False) # don't put tick labels at the top
ax5b.xaxis.tick_bottom()
ax5a.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax5a.transAxes, color='k', clip_on=False)
ax5a.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax5a.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax5b.transAxes) # switch to the bottom axes
ax5b.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax5b.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax5b.set_xlabel('Time (year)')
ax5b.set_ylabel('C flows (t-C)')
ax5a.set_ylabel('C flows (t-C)')
ax5a.set_title('Net carbon balance, PF_PO')
plt.show()
#%%
#Step (25): Generate the excel file for documentation of individual carbon flows in the system definition (Fig. 1)
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
Column1 = year
division = 1000*44/12
division_CH4 = 1000*16/12
## S2nu
## define the input flow for the landfill (F5-7)
OC_storage_S2nu = df2nu['Other_C_storage'].values
OC_storage_S2nu = [x/division for x in OC_storage_S2nu]
OC_storage_S2nu = [abs(number) for number in OC_storage_S2nu]
C_LF_S2nu = [x*1/0.82 for x in OC_storage_S2nu]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S2nu = [x/division for x in df2nu['Input_PF'].values]
HWP_S2nu_energy = [x*1/3 for x in c_firewood_energy_S2nu]
HWP_S2nu_landfill = [x*1/0.82 for x in OC_storage_S2nu]
HWP_S2nu_sum = [HWP_S2nu, HWP_S2nu_energy, HWP_S2nu_landfill]
HWP_S2nu_sum = [sum(x) for x in zip(*HWP_S2nu_sum)]
#in-use stocks (S-4)
TestDSM2nu.s = [x/division for x in TestDSM2nu.s]
#TestDSM2nu.i = [x/division for x in TestDSM2nu.i]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S2nu = (tf,1)
stocks_S2nu = np.zeros(zero_matrix_stocks_S2nu)
i = 0
stocks_S2nu[0] = C_LF_S2nu[0] - Landfill_decomp_PF_PO_S2nu[0]
while i < tf-1:
stocks_S2nu[i+1] = np.array(C_LF_S2nu[i+1] - Landfill_decomp_PF_PO_S2nu[i+1] + stocks_S2nu[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S2nu = [x1+x2 for (x1,x2) in zip(HWP_S2nu_sum, [x*2/3 for x in c_firewood_energy_S2nu])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S2nu = (tf,1)
ForCstocks_S2nu = np.zeros(zero_matrix_ForCstocks_S2nu)
i = 0
ForCstocks_S2nu[0] = initAGB - flat_list_nucleus[0] - decomp_emissions[0] - HWP_logged_S2nu[0]
while i < tf-1:
ForCstocks_S2nu[i+1] = np.array(ForCstocks_S2nu[i] - flat_list_nucleus[i+1] - decomp_emissions[i+1] - HWP_logged_S2nu[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df2nu_amount = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_PO_dim.xlsx', 'PF_PO_S2nu')
NonRW_amount_S2nu = df2nu_amount['NonRW_amount'].values
NonRW_amount_S2nu = [x/1000 for x in NonRW_amount_S2nu]
##NonRW emissions (F9-0-2)
emission_NonRW_S2nu = [x/division for x in emission_NonRW_S2nu]
#create columns
dfM_nu = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_nucleus,
'F1-0 (t-C)': decomp_emissions[:,0],
#'F1a-2 (t-C)': PF_PO_S2nu,
#'F1c-2 (t-C)': FP_PO_S2nu,
'F1-2 (t-C)': HWP_logged_S2nu,
'St-1 (t-C)':ForCstocks_S2nu[:,0],
'F2-3 (t-C)': HWP_S2nu_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S2nu],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S2nu_sum, [x*1/0.82 for x in OC_storage_S2nu], [x*1/3 for x in c_firewood_energy_S2nu])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S2nu],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S2nu],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM2nu.s,
#'S-4-i (t-C)': TestDSM2nu.i,
'F4-5 (t-C)': TestDSM2nu.o,
'F5-6 (t-C)': TestDSM2nu.o,
'F5-7 (t-C)': C_LF_S2nu,
'F6-0-1 (t-C)': c_firewood_energy_S2nu,
'F6-0-2 (t-C)': TestDSM2nu.o,
'St-7 (t-C)': stocks_S2nu[:,0],
'F7-0 (t-C)': Landfill_decomp_PF_PO_S2nu,
'F8-0 (t-C)': OpProcessing_PF_PO_S2nu,
'S9-0 (t)': NonRW_amount_S2nu,
'F9-0 (t-C)': emission_NonRW_S2nu,
})
##S2pl
## define the input flow for the landfill (F5-7)
OC_storage_S2pl = df2pl['Other_C_storage'].values
OC_storage_S2pl = [x/division for x in OC_storage_S2pl]
OC_storage_S2pl = [abs(number) for number in OC_storage_S2pl]
C_LF_S2pl = [x*1/0.82 for x in OC_storage_S2pl]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S2pl = [x/division for x in df2pl['Input_PF'].values]
HWP_S2pl_energy = [x*1/3 for x in c_firewood_energy_S2pl]
HWP_S2pl_landfill = [x*1/0.82 for x in OC_storage_S2pl]
HWP_S2pl_sum = [HWP_S2pl, HWP_S2pl_energy, HWP_S2pl_landfill]
HWP_S2pl_sum = [sum(x) for x in zip(*HWP_S2pl_sum)]
#in-use stocks (S-4)
TestDSM2pl.s = [x/division for x in TestDSM2pl.s]
#TestDSM2pl.i = [x/division for x in TestDSM2pl.i]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S2pl = (tf,1)
stocks_S2pl = np.zeros(zero_matrix_stocks_S2pl)
i = 0
stocks_S2pl[0] = C_LF_S2pl[0] - Landfill_decomp_PF_PO_S2pl[0]
while i < tf-1:
stocks_S2pl[i+1] = np.array(C_LF_S2pl[i+1] - Landfill_decomp_PF_PO_S2pl[i+1] + stocks_S2pl[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S2pl = [x1+x2 for (x1,x2) in zip(HWP_S2pl_sum, [x*2/3 for x in c_firewood_energy_S2pl])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S2pl = (tf,1)
ForCstocks_S2pl = np.zeros(zero_matrix_ForCstocks_S2pl)
i = 0
ForCstocks_S2pl[0] = initAGB - flat_list_plasma[0] - decomp_emissions[0] - HWP_logged_S2pl[0]
while i < tf-1:
ForCstocks_S2pl[i+1] = np.array(ForCstocks_S2pl[i] - flat_list_plasma[i+1] - decomp_emissions[i+1] - HWP_logged_S2pl[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df2pl_amount = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_PO_dim.xlsx', 'PF_PO_S2pl')
NonRW_amount_S2pl = df2pl_amount['NonRW_amount'].values
NonRW_amount_S2pl = [x/1000 for x in NonRW_amount_S2pl]
##NonRW emissions (F9-0-2)
emission_NonRW_S2pl = [x/division for x in emission_NonRW_S2pl]
#create columns
dfM_pl = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_plasma,
'F1-0 (t-C)': decomp_emissions[:,0],
#'F1a-2 (t-C)': PF_PO_S2pl,
#'F1c-2 (t-C)': FP_PO_S2pl,
'F1-2 (t-C)': HWP_logged_S2pl,
'St-1 (t-C)':ForCstocks_S2pl[:,0],
'F2-3 (t-C)': HWP_S2pl_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S2pl],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S2pl_sum, [x*1/0.82 for x in OC_storage_S2pl], [x*1/3 for x in c_firewood_energy_S2pl])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S2pl],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S2pl],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM2pl.s,
#'S-4-i (t-C)': TestDSM2pl.i,
'F4-5 (t-C)': TestDSM2pl.o,
'F5-6 (t-C)': TestDSM2pl.o,
'F5-7 (t-C)': C_LF_S2pl,
'F6-0-1 (t-C)': c_firewood_energy_S2pl,
'F6-0-2 (t-C)': TestDSM2pl.o,
'St-7 (t-C)': stocks_S2pl[:,0],
'F7-0 (t-C)': Landfill_decomp_PF_PO_S2pl,
'F8-0 (t-C)': OpProcessing_PF_PO_S2pl,
'S9-0 (t)': NonRW_amount_S2pl,
'F9-0 (t-C)': emission_NonRW_S2pl,
})
##Enu
## define the input flow for the landfill (F5-7)
OC_storage_Enu = dfEnu['Other_C_storage'].values
OC_storage_Enu = [x/division for x in OC_storage_Enu]
OC_storage_Enu = [abs(number) for number in OC_storage_Enu]
C_LF_Enu = [x*1/0.82 for x in OC_storage_Enu]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_Enu = [x/division for x in dfEnu['Wood_pellets'].values]
HWP_Enu_energy = [x*1/3 for x in c_firewood_energy_Enu]
HWP_Enu_landfill = [x*1/0.82 for x in OC_storage_Enu]
HWP_Enu_sum = [HWP_Enu, HWP_Enu_energy, HWP_Enu_landfill]
HWP_Enu_sum = [sum(x) for x in zip(*HWP_Enu_sum)]
#in-use stocks (S-4)
TestDSM3nu.s = [x/division for x in TestDSM3nu.s]
#TestDSM3nu.i = [x/division for x in TestDSM3nu.i]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_Enu = (tf,1)
stocks_Enu = np.zeros(zero_matrix_stocks_Enu)
i = 0
stocks_Enu[0] = C_LF_Enu[0] - Landfill_decomp_PF_PO_Enu[0]
while i < tf-1:
stocks_Enu[i+1] = np.array(C_LF_Enu[i+1] - Landfill_decomp_PF_PO_Enu[i+1] + stocks_Enu[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_Enu = [x1+x2 for (x1,x2) in zip(HWP_Enu_sum, [x*2/3 for x in c_firewood_energy_Enu])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_Enu = (tf,1)
ForCstocks_Enu = np.zeros(zero_matrix_ForCstocks_Enu)
i = 0
ForCstocks_Enu[0] = initAGB - flat_list_nucleus[0] - decomp_emissions[0] - HWP_logged_Enu[0]
while i < tf-1:
ForCstocks_Enu[i+1] = np.array(ForCstocks_Enu[i] - flat_list_nucleus[i+1] - decomp_emissions[i+1] - HWP_logged_Enu[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
dfEnu_amount = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_PO_dim.xlsx', 'PF_PO_Enu')
NonRW_amount_Enu = dfEnu_amount['NonRW_amount'].values
NonRW_amount_Enu = [x/1000 for x in NonRW_amount_Enu]
##NonRW emissions (F9-0-2)
emission_NonRW_Enu = [x/division for x in emission_NonRW_Enu]
#create columns
dfE_nu = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_nucleus,
'F1-0 (t-C)': decomp_emissions[:,0],
#'F1a-2 (t-C)': PF_PO_Enu,
#'F1c-2 (t-C)': FP_PO_Enu,
'F1-2 (t-C)': HWP_logged_Enu,
'St-1 (t-C)':ForCstocks_Enu[:,0],
'F2-3 (t-C)': HWP_Enu_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_Enu],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_Enu_sum, [x*1/0.82 for x in OC_storage_Enu], [x*1/3 for x in c_firewood_energy_Enu])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_Enu],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_Enu],
'F4-0 (t-C)':c_pellets_Enu,
'St-4 (t-C)': TestDSM3nu.s,
#'S-4-i (t-C)': TestDSM3nu.i,
'F4-5 (t-C)': TestDSM3nu.o,
'F5-6 (t-C)': TestDSM3nu.o,
'F5-7 (t-C)': C_LF_Enu,
'F6-0-1 (t-C)': c_firewood_energy_Enu,
'F6-0-2 (t-C)': TestDSM3nu.o,
'St-7 (t-C)': stocks_Enu[:,0],
'F7-0 (t-C)': Landfill_decomp_PF_PO_Enu,
'F8-0 (t-C)': OpProcessing_PF_PO_Enu,
'S9-0 (t)': NonRW_amount_Enu,
'F9-0 (t-C)': emission_NonRW_Enu,
})
##Epl
## define the input flow for the landfill (F5-7)
OC_storage_Epl = dfEpl['Other_C_storage'].values
OC_storage_Epl = [x/division for x in OC_storage_Epl]
OC_storage_Epl = [abs(number) for number in OC_storage_Epl]
C_LF_Epl = [x*1/0.82 for x in OC_storage_Epl]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_Epl = [x/division for x in dfEpl['Wood_pellets'].values]
HWP_Epl_energy = [x*1/3 for x in c_firewood_energy_Epl]
HWP_Epl_landfill = [x*1/0.82 for x in OC_storage_Epl]
HWP_Epl_sum = [HWP_Epl, HWP_Epl_energy, HWP_Epl_landfill]
HWP_Epl_sum = [sum(x) for x in zip(*HWP_Epl_sum)]
#in-use stocks (S-4)
TestDSM3pl.s = [x/division for x in TestDSM3pl.s]
#TestDSM3pl.i = [x/division for x in TestDSM3pl.i]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_Epl = (tf,1)
stocks_Epl = np.zeros(zero_matrix_stocks_Epl)
i = 0
stocks_Epl[0] = C_LF_Epl[0] - Landfill_decomp_PF_PO_Epl[0]
while i < tf-1:
stocks_Epl[i+1] = np.array(C_LF_Epl[i+1] - Landfill_decomp_PF_PO_Epl[i+1] + stocks_Epl[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_Epl = [x1+x2 for (x1,x2) in zip(HWP_Epl_sum, [x*2/3 for x in c_firewood_energy_Epl])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_Epl = (tf,1)
ForCstocks_Epl = np.zeros(zero_matrix_ForCstocks_Epl)
i = 0
ForCstocks_Epl[0] = initAGB - flat_list_plasma[0] - decomp_emissions[0] - HWP_logged_Epl[0]
while i < tf-1:
ForCstocks_Epl[i+1] = np.array(ForCstocks_Epl[i] - flat_list_plasma[i+1] - decomp_emissions[i+1] - HWP_logged_Epl[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
dfEpl_amount = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_PO_dim.xlsx', 'PF_PO_Epl')
NonRW_amount_Epl = dfEpl_amount['NonRW_amount'].values
NonRW_amount_Epl = [x/1000 for x in NonRW_amount_Epl]
##NonRW emissions (F9-0-2)
emission_NonRW_Epl = [x/division for x in emission_NonRW_Epl]
#create columns
dfE_pl = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_plasma,
'F1-0 (t-C)': decomp_emissions[:,0],
#'F1a-2 (t-C)': PF_PO_Epl,
#'F1c-2 (t-C)': FP_PO_Epl,
'F1-2 (t-C)': HWP_logged_Epl,
'St-1 (t-C)':ForCstocks_Epl[:,0],
'F2-3 (t-C)': HWP_Epl_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_Epl],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_Epl_sum, [x*1/0.82 for x in OC_storage_Epl], [x*1/3 for x in c_firewood_energy_Epl])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_Epl],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_Epl],
'F4-0 (t-C)': c_pellets_Epl,
'St-4 (t-C)': TestDSM3pl.s,
#'S-4-i (t-C)': TestDSM3pl.i,
'F4-5 (t-C)': TestDSM3pl.o,
'F5-6 (t-C)': TestDSM3pl.o,
'F5-7 (t-C)': C_LF_Epl,
'F6-0-1 (t-C)': c_firewood_energy_Epl,
'F6-0-2 (t-C)': TestDSM3pl.o,
'St-7 (t-C)': stocks_Epl[:,0],
'F7-0 (t-C)': Landfill_decomp_PF_PO_Epl,
'F8-0 (t-C)': OpProcessing_PF_PO_Epl,
'S9-0 (t)': NonRW_amount_Epl,
'F9-0 (t-C)': emission_NonRW_Epl,
})
writer = pd.ExcelWriter('C_flows_SysDef_PF_PO_dim.xlsx', engine = 'xlsxwriter')
dfM_nu.to_excel(writer, sheet_name = 'PF_PO_Mnu', header=True, index=False)
dfM_pl.to_excel(writer, sheet_name = 'PF_PO_Mpl', header=True, index=False)
dfE_nu.to_excel(writer, sheet_name = 'PF_PO_E2nu', header=True, index=False)
dfE_pl.to_excel(writer, sheet_name = 'PF_PO_E2pl', header=True, index=False)
writer.save()
writer.close()
#%%
| [
"noreply@github.com"
] | rioarya.noreply@github.com |
8810e80afe9d5667581d1c646a07dad52c3242c2 | 131ccf66fb787e9b1f0773a25fa518d1f2a3c5d0 | /gui_programming/guimaker.py | f88dcbdb765fc650380d10a48a44bdb26e259768 | [] | no_license | jocogum10/learning-python-programming | a0ba62abde49fd79762bcb7ba4a94bf8126afa77 | 035858bd332e3970d95db8bce7b1175e450802db | refs/heads/master | 2020-07-07T17:08:00.743196 | 2019-12-13T05:32:47 | 2019-12-13T05:32:47 | 203,416,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,637 | py | """
################################################################################
An extended Frame that makes window menus and toolbars automatically.
Use GuiMakerMenu for embedded components (makes frame-based menus).
Use GuiMakerWindowMenu for top-level windows (makes Tk8.0 window menus).
See the self-test code (and PyEdit) for an example layout tree format.
################################################################################
"""
import sys
from tkinter import * # widget classes
from tkinter.messagebox import showinfo
class GuiMaker(Frame):
menuBar = [] # class defaults
toolBar = [] # change per instance in subclasses
helpButton = True # set these in start() if need self
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH) # make frame stretchable
self.start() # for subclass: set menu/toolBar
self.makeMenuBar() # done here: build menu bar
self.makeToolBar() # done here: build toolbar
self.makeWidgets() # for subclass: add middle part
def makeMenuBar(self):
"""
make menu bar at the top (Tk8.0 menus below)
expand=no, fill=x so same width on resize
"""
menubar = Frame(self, relief=RAISED, bd=2)
menubar.pack(side=TOP, fill=X)
for (name, key, items) in self.menuBar:
mbutton = Menubutton(menubar, text=name, underline=key)
mbutton.pack(side=LEFT)
pulldown = Menu(mbutton)
self.addMenuItems(pulldown, items)
mbutton.config(menu=pulldown)
if self.helpButton:
Button(menubar, text = 'Help',
cursor = 'gumby',
relief = FLAT,
command = self.help).pack(side=RIGHT)
def addMenuItems(self, menu, items):
for item in items: # scan nested items list
if item == 'separator': # string: add separator
menu.add_separator({})
elif type(item) == list: # list: disabled item list
for num in item:
menu.entryconfig(num, state=DISABLED)
elif type(item[2]) != list:
menu.add_command(label = item[0], # command:
underline = item[1], # add command
command = item[2]) # cmd=callable
else:
pullover = Menu(menu)
self.addMenuItems(pullover, item[2]) # sublist:
menu.add_cascade(label = item[0], # make submenu
underline = item[1], # add cascade
menu = pullover)
def makeToolBar(self):
"""
make button bar at bottom, if any
expand=no, fill=x so same width on resize
this could support images too: see chapter 9,
would need prebuilt gifs or PIL for thumbnails
"""
if self.toolBar:
toolbar = Frame(self, cursor='hand2', relief=SUNKEN, bd=2)
toolbar.pack(side=BOTTOM, fill=X)
for (name, action, where) in self.toolBar:
Button(toolbar, text=name, command=action).pack(where)
def makeWidgets(self):
"""
make 'middle' part last, so menu/toolbar
is always on top/bottom and clipped last;
override this default, pack middle any side;
for grid: grid middle part in packed frame
"""
name = Label(self,
width=40, height=10,
relief=SUNKEN, bg='white',
text = self.__class__.__name__,
cursor = 'crosshair')
name.pack(expand=YES, fill=BOTH, side=TOP)
def help(self):
"override me in subclass"
showinfo('Help', 'Sorry, no help for ' + self.__class__.__name__)
def start(self):
"override me in subclass: set menu/toolbar with self"
pass
################################################################################
# Customize for Tk 8.0 main window menu bar, instead of a frame
################################################################################
GuiMakerFrameMenu = GuiMaker # use this for embedded component menus
class GuiMakerWindowMenu(GuiMaker): # use this for top-level window menus
def makeMenuBar(self):
menubar = Menu(self.master)
self.master.config(menu=menubar)
for (name, key, items) in self.menuBar:
pulldown = Menu(menubar)
self.addMenuItems(pulldown, items)
menubar.add_cascade(label=name, underline=key, menu=pulldown)
if self.helpButton:
if sys.platform[:3] == 'win':
menubar.add_command(label='Help', command=self.help)
else:
pulldown = Menu(menubar) # Linux needs real pull down
pulldown.add_command(label='About', command=self.help)
menubar.add_cascade(label='Help', menu=pulldown)
################################################################################
# Self-test when file run standalone: 'python guimaker.py'
################################################################################
if __name__ == '__main__':
from guimixin import GuiMixin # mix in a help method
menuBar = [
('File', 0,
[('Open', 0, lambda:0),
('Quit', 0, sys.exit)]),
('Edit', 0,
[('Cut', 0, lambda:0),
('Paste', 0, lambda:0)]) ]
toolBar = [('Quit', sys.exit, {'side': LEFT})]
class TestAppFrameMenu(GuiMixin, GuiMakerFrameMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar
class TestAppWindowMenu(GuiMixin, GuiMakerWindowMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar
class TestAppWindowMenuBasic(GuiMakerWindowMenu):
def start(self):
self.menuBar = menuBar
self.toolBar = toolBar # guimaker help, not guimixin
root = Tk()
TestAppFrameMenu(Toplevel())
TestAppWindowMenu(Toplevel())
TestAppWindowMenuBasic(root)
root.mainloop() | [
"jocogum10@gmail.com"
] | jocogum10@gmail.com |
347f8b54dfb2cd1482e50fb225597255d806a74b | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-elb/huaweicloudsdkelb/v3/model/list_load_balancers_request.py | 65c6fc83e8a69f5da83a58da3b5a9f60ed29c66c | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,415 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListLoadBalancersRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'admin_state_up': 'bool',
'availability_zone_list': 'list[str]',
'billing_info': 'list[str]',
'deletion_protection_enable': 'bool',
'description': 'list[str]',
'eips': 'list[str]',
'enterprise_project_id': 'list[str]',
'guaranteed': 'bool',
'id': 'list[str]',
'ip_version': 'list[int]',
'ipv6_vip_address': 'list[str]',
'ipv6_vip_port_id': 'list[str]',
'ipv6_vip_virsubnet_id': 'list[str]',
'l4_flavor_id': 'list[str]',
'l4_scale_flavor_id': 'list[str]',
'l7_flavor_id': 'list[str]',
'l7_scale_flavor_id': 'list[str]',
'limit': 'int',
'marker': 'str',
'member_address': 'list[str]',
'member_device_id': 'list[str]',
'name': 'list[str]',
'operating_status': 'list[str]',
'page_reverse': 'bool',
'provisioning_status': 'list[str]',
'publicips': 'list[str]',
'vip_address': 'list[str]',
'vip_port_id': 'list[str]',
'vip_subnet_cidr_id': 'list[str]',
'vpc_id': 'list[str]'
}
attribute_map = {
'admin_state_up': 'admin_state_up',
'availability_zone_list': 'availability_zone_list',
'billing_info': 'billing_info',
'deletion_protection_enable': 'deletion_protection_enable',
'description': 'description',
'eips': 'eips',
'enterprise_project_id': 'enterprise_project_id',
'guaranteed': 'guaranteed',
'id': 'id',
'ip_version': 'ip_version',
'ipv6_vip_address': 'ipv6_vip_address',
'ipv6_vip_port_id': 'ipv6_vip_port_id',
'ipv6_vip_virsubnet_id': 'ipv6_vip_virsubnet_id',
'l4_flavor_id': 'l4_flavor_id',
'l4_scale_flavor_id': 'l4_scale_flavor_id',
'l7_flavor_id': 'l7_flavor_id',
'l7_scale_flavor_id': 'l7_scale_flavor_id',
'limit': 'limit',
'marker': 'marker',
'member_address': 'member_address',
'member_device_id': 'member_device_id',
'name': 'name',
'operating_status': 'operating_status',
'page_reverse': 'page_reverse',
'provisioning_status': 'provisioning_status',
'publicips': 'publicips',
'vip_address': 'vip_address',
'vip_port_id': 'vip_port_id',
'vip_subnet_cidr_id': 'vip_subnet_cidr_id',
'vpc_id': 'vpc_id'
}
def __init__(self, admin_state_up=None, availability_zone_list=None, billing_info=None, deletion_protection_enable=None, description=None, eips=None, enterprise_project_id=None, guaranteed=None, id=None, ip_version=None, ipv6_vip_address=None, ipv6_vip_port_id=None, ipv6_vip_virsubnet_id=None, l4_flavor_id=None, l4_scale_flavor_id=None, l7_flavor_id=None, l7_scale_flavor_id=None, limit=None, marker=None, member_address=None, member_device_id=None, name=None, operating_status=None, page_reverse=None, provisioning_status=None, publicips=None, vip_address=None, vip_port_id=None, vip_subnet_cidr_id=None, vpc_id=None):
"""ListLoadBalancersRequest - a model defined in huaweicloud sdk"""
self._admin_state_up = None
self._availability_zone_list = None
self._billing_info = None
self._deletion_protection_enable = None
self._description = None
self._eips = None
self._enterprise_project_id = None
self._guaranteed = None
self._id = None
self._ip_version = None
self._ipv6_vip_address = None
self._ipv6_vip_port_id = None
self._ipv6_vip_virsubnet_id = None
self._l4_flavor_id = None
self._l4_scale_flavor_id = None
self._l7_flavor_id = None
self._l7_scale_flavor_id = None
self._limit = None
self._marker = None
self._member_address = None
self._member_device_id = None
self._name = None
self._operating_status = None
self._page_reverse = None
self._provisioning_status = None
self._publicips = None
self._vip_address = None
self._vip_port_id = None
self._vip_subnet_cidr_id = None
self._vpc_id = None
self.discriminator = None
if admin_state_up is not None:
self.admin_state_up = admin_state_up
if availability_zone_list is not None:
self.availability_zone_list = availability_zone_list
if billing_info is not None:
self.billing_info = billing_info
if deletion_protection_enable is not None:
self.deletion_protection_enable = deletion_protection_enable
if description is not None:
self.description = description
if eips is not None:
self.eips = eips
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if guaranteed is not None:
self.guaranteed = guaranteed
if id is not None:
self.id = id
if ip_version is not None:
self.ip_version = ip_version
if ipv6_vip_address is not None:
self.ipv6_vip_address = ipv6_vip_address
if ipv6_vip_port_id is not None:
self.ipv6_vip_port_id = ipv6_vip_port_id
if ipv6_vip_virsubnet_id is not None:
self.ipv6_vip_virsubnet_id = ipv6_vip_virsubnet_id
if l4_flavor_id is not None:
self.l4_flavor_id = l4_flavor_id
if l4_scale_flavor_id is not None:
self.l4_scale_flavor_id = l4_scale_flavor_id
if l7_flavor_id is not None:
self.l7_flavor_id = l7_flavor_id
if l7_scale_flavor_id is not None:
self.l7_scale_flavor_id = l7_scale_flavor_id
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if member_address is not None:
self.member_address = member_address
if member_device_id is not None:
self.member_device_id = member_device_id
if name is not None:
self.name = name
if operating_status is not None:
self.operating_status = operating_status
if page_reverse is not None:
self.page_reverse = page_reverse
if provisioning_status is not None:
self.provisioning_status = provisioning_status
if publicips is not None:
self.publicips = publicips
if vip_address is not None:
self.vip_address = vip_address
if vip_port_id is not None:
self.vip_port_id = vip_port_id
if vip_subnet_cidr_id is not None:
self.vip_subnet_cidr_id = vip_subnet_cidr_id
if vpc_id is not None:
self.vpc_id = vpc_id
@property
def admin_state_up(self):
"""Gets the admin_state_up of this ListLoadBalancersRequest.
负载均衡器的管理状态。只支持设定为true。
:return: The admin_state_up of this ListLoadBalancersRequest.
:rtype: bool
"""
return self._admin_state_up
@admin_state_up.setter
def admin_state_up(self, admin_state_up):
"""Sets the admin_state_up of this ListLoadBalancersRequest.
负载均衡器的管理状态。只支持设定为true。
:param admin_state_up: The admin_state_up of this ListLoadBalancersRequest.
:type: bool
"""
self._admin_state_up = admin_state_up
@property
def availability_zone_list(self):
"""Gets the availability_zone_list of this ListLoadBalancersRequest.
可用区。 注: 可用AZ的查询方式可用通过调用nova接口查询 /v2/{project_id}/os-availability-zone
:return: The availability_zone_list of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._availability_zone_list
@availability_zone_list.setter
def availability_zone_list(self, availability_zone_list):
"""Sets the availability_zone_list of this ListLoadBalancersRequest.
可用区。 注: 可用AZ的查询方式可用通过调用nova接口查询 /v2/{project_id}/os-availability-zone
:param availability_zone_list: The availability_zone_list of this ListLoadBalancersRequest.
:type: list[str]
"""
self._availability_zone_list = availability_zone_list
@property
def billing_info(self):
"""Gets the billing_info of this ListLoadBalancersRequest.
预留资源账单信息,默认为空表示按需计费, 非空为包周期。admin权限才能更新此字段。
:return: The billing_info of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._billing_info
@billing_info.setter
def billing_info(self, billing_info):
"""Sets the billing_info of this ListLoadBalancersRequest.
预留资源账单信息,默认为空表示按需计费, 非空为包周期。admin权限才能更新此字段。
:param billing_info: The billing_info of this ListLoadBalancersRequest.
:type: list[str]
"""
self._billing_info = billing_info
@property
def deletion_protection_enable(self):
"""Gets the deletion_protection_enable of this ListLoadBalancersRequest.
是否开启删除保护,false不开启,默认为空都查询
:return: The deletion_protection_enable of this ListLoadBalancersRequest.
:rtype: bool
"""
return self._deletion_protection_enable
@deletion_protection_enable.setter
def deletion_protection_enable(self, deletion_protection_enable):
"""Sets the deletion_protection_enable of this ListLoadBalancersRequest.
是否开启删除保护,false不开启,默认为空都查询
:param deletion_protection_enable: The deletion_protection_enable of this ListLoadBalancersRequest.
:type: bool
"""
self._deletion_protection_enable = deletion_protection_enable
@property
def description(self):
"""Gets the description of this ListLoadBalancersRequest.
负载均衡器的描述信息。
:return: The description of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ListLoadBalancersRequest.
负载均衡器的描述信息。
:param description: The description of this ListLoadBalancersRequest.
:type: list[str]
"""
self._description = description
@property
def eips(self):
"""Gets the eips of this ListLoadBalancersRequest.
公网ELB实例绑定EIP。 示例如下: \"eips\": [ { \"eip_id\": \"a6ded276-c88a-4c58-95e0-5b6d1d2297b3\", \"eip_address\": \"2001:db8:a583:86:cf24:5cc5:8117:6eaa\", \"ip_version\": 6 } ] 查询时指定:eips=eip_id=XXXX
:return: The eips of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._eips
@eips.setter
def eips(self, eips):
"""Sets the eips of this ListLoadBalancersRequest.
公网ELB实例绑定EIP。 示例如下: \"eips\": [ { \"eip_id\": \"a6ded276-c88a-4c58-95e0-5b6d1d2297b3\", \"eip_address\": \"2001:db8:a583:86:cf24:5cc5:8117:6eaa\", \"ip_version\": 6 } ] 查询时指定:eips=eip_id=XXXX
:param eips: The eips of this ListLoadBalancersRequest.
:type: list[str]
"""
self._eips = eips
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this ListLoadBalancersRequest.
企业项目ID。
:return: The enterprise_project_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this ListLoadBalancersRequest.
企业项目ID。
:param enterprise_project_id: The enterprise_project_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._enterprise_project_id = enterprise_project_id
@property
def guaranteed(self):
"""Gets the guaranteed of this ListLoadBalancersRequest.
共享型:false 性能保障型:true
:return: The guaranteed of this ListLoadBalancersRequest.
:rtype: bool
"""
return self._guaranteed
@guaranteed.setter
def guaranteed(self, guaranteed):
"""Sets the guaranteed of this ListLoadBalancersRequest.
共享型:false 性能保障型:true
:param guaranteed: The guaranteed of this ListLoadBalancersRequest.
:type: bool
"""
self._guaranteed = guaranteed
@property
def id(self):
"""Gets the id of this ListLoadBalancersRequest.
负载均衡器ID。
:return: The id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListLoadBalancersRequest.
负载均衡器ID。
:param id: The id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._id = id
@property
def ip_version(self):
"""Gets the ip_version of this ListLoadBalancersRequest.
IP版本信息。 取值范围:4和6 4:IPv4 6:IPv6
:return: The ip_version of this ListLoadBalancersRequest.
:rtype: list[int]
"""
return self._ip_version
@ip_version.setter
def ip_version(self, ip_version):
"""Sets the ip_version of this ListLoadBalancersRequest.
IP版本信息。 取值范围:4和6 4:IPv4 6:IPv6
:param ip_version: The ip_version of this ListLoadBalancersRequest.
:type: list[int]
"""
self._ip_version = ip_version
@property
def ipv6_vip_address(self):
"""Gets the ipv6_vip_address of this ListLoadBalancersRequest.
双栈实例对应v6的ip地址。
:return: The ipv6_vip_address of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._ipv6_vip_address
@ipv6_vip_address.setter
def ipv6_vip_address(self, ipv6_vip_address):
"""Sets the ipv6_vip_address of this ListLoadBalancersRequest.
双栈实例对应v6的ip地址。
:param ipv6_vip_address: The ipv6_vip_address of this ListLoadBalancersRequest.
:type: list[str]
"""
self._ipv6_vip_address = ipv6_vip_address
@property
def ipv6_vip_port_id(self):
"""Gets the ipv6_vip_port_id of this ListLoadBalancersRequest.
双栈实例对应v6的端口。
:return: The ipv6_vip_port_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._ipv6_vip_port_id
@ipv6_vip_port_id.setter
def ipv6_vip_port_id(self, ipv6_vip_port_id):
"""Sets the ipv6_vip_port_id of this ListLoadBalancersRequest.
双栈实例对应v6的端口。
:param ipv6_vip_port_id: The ipv6_vip_port_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._ipv6_vip_port_id = ipv6_vip_port_id
@property
def ipv6_vip_virsubnet_id(self):
"""Gets the ipv6_vip_virsubnet_id of this ListLoadBalancersRequest.
双栈实例对应v6的网络id 。 说明:vpc_id , vip_subnet_cidr_id, ipv6_vip_virsubnet_id不能同时为空。
:return: The ipv6_vip_virsubnet_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._ipv6_vip_virsubnet_id
@ipv6_vip_virsubnet_id.setter
def ipv6_vip_virsubnet_id(self, ipv6_vip_virsubnet_id):
"""Sets the ipv6_vip_virsubnet_id of this ListLoadBalancersRequest.
双栈实例对应v6的网络id 。 说明:vpc_id , vip_subnet_cidr_id, ipv6_vip_virsubnet_id不能同时为空。
:param ipv6_vip_virsubnet_id: The ipv6_vip_virsubnet_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._ipv6_vip_virsubnet_id = ipv6_vip_virsubnet_id
@property
def l4_flavor_id(self):
"""Gets the l4_flavor_id of this ListLoadBalancersRequest.
四层Flavor, 按需计费不填, 包周期由用户设置。
:return: The l4_flavor_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._l4_flavor_id
@l4_flavor_id.setter
def l4_flavor_id(self, l4_flavor_id):
"""Sets the l4_flavor_id of this ListLoadBalancersRequest.
四层Flavor, 按需计费不填, 包周期由用户设置。
:param l4_flavor_id: The l4_flavor_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._l4_flavor_id = l4_flavor_id
@property
def l4_scale_flavor_id(self):
"""Gets the l4_scale_flavor_id of this ListLoadBalancersRequest.
预留弹性flavor。
:return: The l4_scale_flavor_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._l4_scale_flavor_id
@l4_scale_flavor_id.setter
def l4_scale_flavor_id(self, l4_scale_flavor_id):
"""Sets the l4_scale_flavor_id of this ListLoadBalancersRequest.
预留弹性flavor。
:param l4_scale_flavor_id: The l4_scale_flavor_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._l4_scale_flavor_id = l4_scale_flavor_id
@property
def l7_flavor_id(self):
"""Gets the l7_flavor_id of this ListLoadBalancersRequest.
七层Flavor, 按需计费不填, 包周期由用户设置。
:return: The l7_flavor_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._l7_flavor_id
@l7_flavor_id.setter
def l7_flavor_id(self, l7_flavor_id):
"""Sets the l7_flavor_id of this ListLoadBalancersRequest.
七层Flavor, 按需计费不填, 包周期由用户设置。
:param l7_flavor_id: The l7_flavor_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._l7_flavor_id = l7_flavor_id
@property
def l7_scale_flavor_id(self):
"""Gets the l7_scale_flavor_id of this ListLoadBalancersRequest.
预留弹性flavor。
:return: The l7_scale_flavor_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._l7_scale_flavor_id
@l7_scale_flavor_id.setter
def l7_scale_flavor_id(self, l7_scale_flavor_id):
"""Sets the l7_scale_flavor_id of this ListLoadBalancersRequest.
预留弹性flavor。
:param l7_scale_flavor_id: The l7_scale_flavor_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._l7_scale_flavor_id = l7_scale_flavor_id
@property
def limit(self):
"""Gets the limit of this ListLoadBalancersRequest.
每页返回的个数。
:return: The limit of this ListLoadBalancersRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListLoadBalancersRequest.
每页返回的个数。
:param limit: The limit of this ListLoadBalancersRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ListLoadBalancersRequest.
上一页最后一条记录的ID。 使用说明: - 必须与limit一起使用。 - 不指定时表示查询第一页。 - 该字段不允许为空或无效的ID。
:return: The marker of this ListLoadBalancersRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListLoadBalancersRequest.
上一页最后一条记录的ID。 使用说明: - 必须与limit一起使用。 - 不指定时表示查询第一页。 - 该字段不允许为空或无效的ID。
:param marker: The marker of this ListLoadBalancersRequest.
:type: str
"""
self._marker = marker
@property
def member_address(self):
"""Gets the member_address of this ListLoadBalancersRequest.
后端云服务器的IP地址。
:return: The member_address of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._member_address
@member_address.setter
def member_address(self, member_address):
"""Sets the member_address of this ListLoadBalancersRequest.
后端云服务器的IP地址。
:param member_address: The member_address of this ListLoadBalancersRequest.
:type: list[str]
"""
self._member_address = member_address
@property
def member_device_id(self):
"""Gets the member_device_id of this ListLoadBalancersRequest.
后端云服务器对应的弹性云服务器的ID。
:return: The member_device_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._member_device_id
@member_device_id.setter
def member_device_id(self, member_device_id):
"""Sets the member_device_id of this ListLoadBalancersRequest.
后端云服务器对应的弹性云服务器的ID。
:param member_device_id: The member_device_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._member_device_id = member_device_id
@property
def name(self):
"""Gets the name of this ListLoadBalancersRequest.
负载均衡器名称。
:return: The name of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListLoadBalancersRequest.
负载均衡器名称。
:param name: The name of this ListLoadBalancersRequest.
:type: list[str]
"""
self._name = name
@property
def operating_status(self):
"""Gets the operating_status of this ListLoadBalancersRequest.
负载均衡器的操作状态。 可以为:ONLINE、OFFLINE、DEGRADED、DISABLED或NO_MONITOR。 说明 该字段为预留字段,暂未启用。
:return: The operating_status of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._operating_status
@operating_status.setter
def operating_status(self, operating_status):
"""Sets the operating_status of this ListLoadBalancersRequest.
负载均衡器的操作状态。 可以为:ONLINE、OFFLINE、DEGRADED、DISABLED或NO_MONITOR。 说明 该字段为预留字段,暂未启用。
:param operating_status: The operating_status of this ListLoadBalancersRequest.
:type: list[str]
"""
self._operating_status = operating_status
@property
def page_reverse(self):
"""Gets the page_reverse of this ListLoadBalancersRequest.
分页的顺序,true表示从后往前分页,false表示从前往后分页,默认为false。 使用说明:必须与limit一起使用。
:return: The page_reverse of this ListLoadBalancersRequest.
:rtype: bool
"""
return self._page_reverse
@page_reverse.setter
def page_reverse(self, page_reverse):
"""Sets the page_reverse of this ListLoadBalancersRequest.
分页的顺序,true表示从后往前分页,false表示从前往后分页,默认为false。 使用说明:必须与limit一起使用。
:param page_reverse: The page_reverse of this ListLoadBalancersRequest.
:type: bool
"""
self._page_reverse = page_reverse
@property
def provisioning_status(self):
"""Gets the provisioning_status of this ListLoadBalancersRequest.
负载均衡器的配置状态。 可以为:ACTIVE、PENDING_CREATE 或者ERROR。 说明 该字段为预留字段,暂未启用。
:return: The provisioning_status of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._provisioning_status
@provisioning_status.setter
def provisioning_status(self, provisioning_status):
"""Sets the provisioning_status of this ListLoadBalancersRequest.
负载均衡器的配置状态。 可以为:ACTIVE、PENDING_CREATE 或者ERROR。 说明 该字段为预留字段,暂未启用。
:param provisioning_status: The provisioning_status of this ListLoadBalancersRequest.
:type: list[str]
"""
self._provisioning_status = provisioning_status
@property
def publicips(self):
"""Gets the publicips of this ListLoadBalancersRequest.
公网IP 示例如下: \"publicips\": [ { \"publicip_id\": \"a6ded276-c88a-4c58-95e0-5b6d1d2297b3\", \"publicip_address\": \"2001:db8:a583:86:cf24:5cc5:8117:6eaa\", \"publicip_ip_version\": 6 } ] 查询时指定:publicips=publicip_id=XXXX,YYYY
:return: The publicips of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._publicips
@publicips.setter
def publicips(self, publicips):
"""Sets the publicips of this ListLoadBalancersRequest.
公网IP 示例如下: \"publicips\": [ { \"publicip_id\": \"a6ded276-c88a-4c58-95e0-5b6d1d2297b3\", \"publicip_address\": \"2001:db8:a583:86:cf24:5cc5:8117:6eaa\", \"publicip_ip_version\": 6 } ] 查询时指定:publicips=publicip_id=XXXX,YYYY
:param publicips: The publicips of this ListLoadBalancersRequest.
:type: list[str]
"""
self._publicips = publicips
@property
def vip_address(self):
"""Gets the vip_address of this ListLoadBalancersRequest.
负载均衡器的虚拟IP。
:return: The vip_address of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._vip_address
@vip_address.setter
def vip_address(self, vip_address):
"""Sets the vip_address of this ListLoadBalancersRequest.
负载均衡器的虚拟IP。
:param vip_address: The vip_address of this ListLoadBalancersRequest.
:type: list[str]
"""
self._vip_address = vip_address
@property
def vip_port_id(self):
"""Gets the vip_port_id of this ListLoadBalancersRequest.
负载均衡器虚拟IP对应的端口ID。
:return: The vip_port_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._vip_port_id
@vip_port_id.setter
def vip_port_id(self, vip_port_id):
"""Sets the vip_port_id of this ListLoadBalancersRequest.
负载均衡器虚拟IP对应的端口ID。
:param vip_port_id: The vip_port_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._vip_port_id = vip_port_id
@property
def vip_subnet_cidr_id(self):
"""Gets the vip_subnet_cidr_id of this ListLoadBalancersRequest.
负载均衡器所在的子网ID,仅支持内网类型。 说明:vpc_id , vip_subnet_cidr_id, ipv6_vip_virsubnet_id不能同时为空。
:return: The vip_subnet_cidr_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._vip_subnet_cidr_id
@vip_subnet_cidr_id.setter
def vip_subnet_cidr_id(self, vip_subnet_cidr_id):
"""Sets the vip_subnet_cidr_id of this ListLoadBalancersRequest.
负载均衡器所在的子网ID,仅支持内网类型。 说明:vpc_id , vip_subnet_cidr_id, ipv6_vip_virsubnet_id不能同时为空。
:param vip_subnet_cidr_id: The vip_subnet_cidr_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._vip_subnet_cidr_id = vip_subnet_cidr_id
@property
def vpc_id(self):
"""Gets the vpc_id of this ListLoadBalancersRequest.
实例对应的vpc属性。 若无,则从vip_subnet_cidr_id获取。 说明:vpc_id , vip_subnet_cidr_id, ipv6_vip_virsubnet_id不能同时为空。
:return: The vpc_id of this ListLoadBalancersRequest.
:rtype: list[str]
"""
return self._vpc_id
@vpc_id.setter
def vpc_id(self, vpc_id):
"""Sets the vpc_id of this ListLoadBalancersRequest.
实例对应的vpc属性。 若无,则从vip_subnet_cidr_id获取。 说明:vpc_id , vip_subnet_cidr_id, ipv6_vip_virsubnet_id不能同时为空。
:param vpc_id: The vpc_id of this ListLoadBalancersRequest.
:type: list[str]
"""
self._vpc_id = vpc_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListLoadBalancersRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
c90209636c7817cc78f13b924ca3daa65ca95008 | 8f0f8b1eb80f203e96a7e1d9fead971354c223d4 | /tests/test_source.py | 0094ebe2656e8b8c8130e0ee928aa602f88596e0 | [] | no_license | wangilamain/news-articles | 949afc50bfd864696b949acc2cdcc0b41a7da774 | a1ce6a9cd6f65d4af77ba2b9cb017e20b6266caf | refs/heads/master | 2023-01-03T15:42:33.701963 | 2020-10-21T13:10:38 | 2020-10-21T13:10:38 | 294,770,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | import unittest
from app.models import Source
class SourceTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Source class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_source = Source('abc-news','ABC News')
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source)) | [
"wangilayng@gmail.com"
] | wangilayng@gmail.com |
9c6f41f489ce214acfea0123d11552e4e1d492a3 | 560ab81b2fd79a6531941abaec539a827561e3d8 | /Solutions/Day01.py | c1dc8a0748fcfc34eb3f3394c17a0684c6e6845d | [] | no_license | crashb/AoC-2017 | f0bb7be027624a4e3cf01614e0818c2fcaa4195f | 1eafd9f60c65583c6590063a951f6c1e74d32488 | refs/heads/master | 2021-08-30T19:16:08.031974 | 2017-12-19T04:22:34 | 2017-12-19T04:22:34 | 112,997,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # solution to http://adventofcode.com/2017/day/1
# takes string as argument and returns the solution to the first captcha
# returns int solution
def solveCaptcha1(input):
runningTotal = 0
for i in range(0, len(input)):
nextPlace = (i+1) % len(input)
if input[i] == input[nextPlace]:
runningTotal += int(input[i])
return runningTotal
# takes string as argument and returns the solution to the second captcha
# returns int solution
def solveCaptcha2(input):
runningTotal = 0
for i in range(0, len(input)):
nextPlace = int(i+len(input)/2) % len(input)
if input[i] == input[nextPlace]:
runningTotal += int(input[i])
return runningTotal
if __name__ == "__main__":
# read file into string
with open('Day01Input.txt', 'r') as myfile:
input = ''.join(myfile.read().strip().split('\n'))
captchaSolution1 = solveCaptcha1(input)
print("First captcha solution: " + str(captchaSolution1))
captchaSolution2 = solveCaptcha2(input)
print("Second captcha solution: " + str(captchaSolution2))
| [
"snickerless1@shaw.ca"
] | snickerless1@shaw.ca |
01593c9ffc95662e33bc80059daecc2592dd829f | 08e26af5604fda61846c421d739c82ea0bd17271 | /product_account_purchase_sale/account_invoice.py | c25db1448d9f73d91fe3eab11331df7acb6e59cc | [] | no_license | germanponce/nishikawa_addons | 376342d6d45250eec85443abf4eb4f760256de85 | 765dd185272407175fbc14a8f4d702bf6e5e759d | refs/heads/master | 2021-01-25T04:09:07.391100 | 2014-07-14T14:57:21 | 2014-07-14T14:57:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,410 | py | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2010 moylop260 - http://www.hesatecnica.com.com/
# All Rights Reserved.
# info skype: german_442 email: (german.ponce@hesatecnica.com)
############################################################################
# Coded by: german_442 email: (german.ponce@hesatecnica.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import time
from datetime import datetime, date
from tools.translate import _
from openerp import SUPERUSER_ID
class account_invoice(osv.osv):
_name = 'account.invoice'
_inherit ='account.invoice'
_columns = {
'department_id': fields.many2one('hr.department', 'Departamento', help='Define el Departamento encargado de la Solicitud de la Compra' ),
}
_default = {
}
account_invoice()
class account_invoice_line(osv.osv):
_inherit ='account.invoice.line'
_columns = {
'analytics_accounts_required': fields.boolean('Cuentas Analiticas Requeridas') ,
}
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
value = {}
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id, 'force_company': company_id})
if not partner_id:
raise osv.except_osv(_('No Partner Defined!'),_("You must first select a partner!") )
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain':{'product_uom':[]}}
else:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
if part.lang:
context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type in ('out_invoice','out_refund'):
a = res.property_account_income.id
if not a:
a = res.categ_id.property_account_income_categ.id
else:
a = res.property_account_expense.id
if not a:
a = res.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
if a:
result['account_id'] = a
if type in ('out_invoice', 'out_refund'):
taxes = res.taxes_id and res.taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
else:
taxes = res.supplier_taxes_id and res.supplier_taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
if type in ('in_invoice', 'in_refund'):
result.update( {'price_unit': price_unit or res.standard_price,'invoice_line_tax_id': tax_id} )
else:
result.update({'price_unit': res.list_price, 'invoice_line_tax_id': tax_id})
result['name'] = res.partner_ref
result['uos_id'] = uom_id or res.uom_id.id
if res.description:
result['name'] += '\n'+res.description
domain = {'uos_id':[('category_id','=',res.uom_id.category_id.id)]}
res_final = {'value':result, 'domain':domain}
if not company_id or not currency_id:
return res_final
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
currency = self.pool.get('res.currency').browse(cr, uid, currency_id, context=context)
if company.currency_id.id != currency.id:
if type in ('in_invoice', 'in_refund'):
res_final['value']['price_unit'] = res.standard_price
new_price = res_final['value']['price_unit'] * currency.rate
res_final['value']['price_unit'] = new_price
if result['uos_id'] and result['uos_id'] != res.uom_id.id:
selected_uom = self.pool.get('product.uom').browse(cr, uid, result['uos_id'], context=context)
new_price = self.pool.get('product.uom')._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uos_id'])
res_final['value']['price_unit'] = new_price
#### Validamos que el producto requiera las cuentas Analiticas
prod_obj = self.pool.get('product.product')
prod_b = prod_obj.browse(cr, uid, [product], context=None)[0]
if prod_b.analytics_accounts_required:
res_final['value'].update({'analytics_accounts_required':True})
return res_final
account_invoice_line()
class account_account_template(osv.osv):
_name = "account.account.template"
_inherit = "account.account.template"
_columns = {
'name': fields.char('Name', size=256, required=True, select=True, translate=True),
}
account_account_template()
class account_account(osv.osv):
_name = "account.account"
_inherit = "account.account"
_columns = {
'name': fields.char('Name', size=256, required=True, select=True, translate=True),
}
account_account() | [
"german_442@hotmail.com"
] | german_442@hotmail.com |
c330b90db6252be01fbc219bc9c24d5fa11bc887 | dc8ddd8f11893e4e4488f5f7ac910342f8654670 | /tests/providers/google/cloud/operators/test_datafusion.py | 466f67061d4d528790d9a970ccb2d07d4dbf3dca | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | GregKarabinos/airflow | 2f737fd51c331eb8347d9e5415034522b6e6c704 | 606b697ebfcc188aa5fe4040c7163fbe2a004ce9 | refs/heads/main | 2023-07-14T21:39:44.703132 | 2021-08-28T01:57:40 | 2021-08-28T01:57:40 | 400,819,849 | 0 | 0 | Apache-2.0 | 2021-08-28T14:59:28 | 2021-08-28T14:59:27 | null | UTF-8 | Python | false | false | 10,129 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
from airflow import DAG
from airflow.providers.google.cloud.hooks.datafusion import SUCCESS_STATES, PipelineStates
from airflow.providers.google.cloud.operators.datafusion import (
CloudDataFusionCreateInstanceOperator,
CloudDataFusionCreatePipelineOperator,
CloudDataFusionDeleteInstanceOperator,
CloudDataFusionDeletePipelineOperator,
CloudDataFusionGetInstanceOperator,
CloudDataFusionListPipelinesOperator,
CloudDataFusionRestartInstanceOperator,
CloudDataFusionStartPipelineOperator,
CloudDataFusionStopPipelineOperator,
CloudDataFusionUpdateInstanceOperator,
)
HOOK_STR = "airflow.providers.google.cloud.operators.datafusion.DataFusionHook"
LOCATION = "test-location"
INSTANCE_NAME = "airflow-test-instance"
INSTANCE = {"type": "BASIC", "displayName": INSTANCE_NAME}
PROJECT_ID = "test_project_id"
PIPELINE_NAME = "shrubberyPipeline"
PIPELINE = {"test": "pipeline"}
INSTANCE_URL = "http://datafusion.instance.com"
NAMESPACE = "TEST_NAMESPACE"
RUNTIME_ARGS = {"arg1": "a", "arg2": "b"}
class TestCloudDataFusionUpdateInstanceOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
update_maks = "instance.name"
op = CloudDataFusionUpdateInstanceOperator(
task_id="test_tasks",
instance_name=INSTANCE_NAME,
instance=INSTANCE,
update_mask=update_maks,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.patch_instance.assert_called_once_with(
instance_name=INSTANCE_NAME,
instance=INSTANCE,
update_mask=update_maks,
location=LOCATION,
project_id=PROJECT_ID,
)
assert mock_hook.return_value.wait_for_operation.call_count == 1
class TestCloudDataFusionRestartInstanceOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
op = CloudDataFusionRestartInstanceOperator(
task_id="test_tasks",
instance_name=INSTANCE_NAME,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.restart_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
assert mock_hook.return_value.wait_for_operation.call_count == 1
class TestCloudDataFusionCreateInstanceOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
op = CloudDataFusionCreateInstanceOperator(
task_id="test_tasks",
instance_name=INSTANCE_NAME,
instance=INSTANCE,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.create_instance.assert_called_once_with(
instance_name=INSTANCE_NAME,
instance=INSTANCE,
location=LOCATION,
project_id=PROJECT_ID,
)
assert mock_hook.return_value.wait_for_operation.call_count == 1
class TestCloudDataFusionDeleteInstanceOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
op = CloudDataFusionDeleteInstanceOperator(
task_id="test_tasks",
instance_name=INSTANCE_NAME,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.delete_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
assert mock_hook.return_value.wait_for_operation.call_count == 1
class TestCloudDataFusionGetInstanceOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
op = CloudDataFusionGetInstanceOperator(
task_id="test_tasks",
instance_name=INSTANCE_NAME,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
class TestCloudDataFusionCreatePipelineOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"apiEndpoint": INSTANCE_URL}
op = CloudDataFusionCreatePipelineOperator(
task_id="test_tasks",
pipeline_name=PIPELINE_NAME,
pipeline=PIPELINE,
instance_name=INSTANCE_NAME,
namespace=NAMESPACE,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
mock_hook.return_value.create_pipeline.assert_called_once_with(
instance_url=INSTANCE_URL,
pipeline_name=PIPELINE_NAME,
pipeline=PIPELINE,
namespace=NAMESPACE,
)
class TestCloudDataFusionDeletePipelineOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"apiEndpoint": INSTANCE_URL}
op = CloudDataFusionDeletePipelineOperator(
task_id="test_tasks",
pipeline_name=PIPELINE_NAME,
version_id="1.12",
instance_name=INSTANCE_NAME,
namespace=NAMESPACE,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
mock_hook.return_value.delete_pipeline.assert_called_once_with(
instance_url=INSTANCE_URL,
pipeline_name=PIPELINE_NAME,
namespace=NAMESPACE,
version_id="1.12",
)
class TestCloudDataFusionStartPipelineOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
PIPELINE_ID = "test_pipeline_id"
mock_hook.return_value.get_instance.return_value = {"apiEndpoint": INSTANCE_URL}
mock_hook.return_value.start_pipeline.return_value = PIPELINE_ID
op = CloudDataFusionStartPipelineOperator(
task_id="test_task",
pipeline_name=PIPELINE_NAME,
instance_name=INSTANCE_NAME,
namespace=NAMESPACE,
location=LOCATION,
project_id=PROJECT_ID,
runtime_args=RUNTIME_ARGS,
)
op.dag = mock.MagicMock(spec=DAG, task_dict={}, dag_id="test")
op.execute({})
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
mock_hook.return_value.start_pipeline.assert_called_once_with(
instance_url=INSTANCE_URL,
pipeline_name=PIPELINE_NAME,
namespace=NAMESPACE,
runtime_args=RUNTIME_ARGS,
)
mock_hook.return_value.wait_for_pipeline_state.assert_called_once_with(
success_states=SUCCESS_STATES + [PipelineStates.RUNNING],
pipeline_id=PIPELINE_ID,
pipeline_name=PIPELINE_NAME,
namespace=NAMESPACE,
instance_url=INSTANCE_URL,
timeout=300,
)
class TestCloudDataFusionStopPipelineOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"apiEndpoint": INSTANCE_URL}
op = CloudDataFusionStopPipelineOperator(
task_id="test_tasks",
pipeline_name=PIPELINE_NAME,
instance_name=INSTANCE_NAME,
namespace=NAMESPACE,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
mock_hook.return_value.stop_pipeline.assert_called_once_with(
instance_url=INSTANCE_URL, pipeline_name=PIPELINE_NAME, namespace=NAMESPACE
)
class TestCloudDataFusionListPipelinesOperator:
@mock.patch(HOOK_STR)
def test_execute(self, mock_hook):
artifact_version = "artifact_version"
artifact_name = "artifact_name"
mock_hook.return_value.get_instance.return_value = {"apiEndpoint": INSTANCE_URL}
op = CloudDataFusionListPipelinesOperator(
task_id="test_tasks",
instance_name=INSTANCE_NAME,
artifact_version=artifact_version,
artifact_name=artifact_name,
namespace=NAMESPACE,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute({})
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
mock_hook.return_value.list_pipelines.assert_called_once_with(
instance_url=INSTANCE_URL,
namespace=NAMESPACE,
artifact_version=artifact_version,
artifact_name=artifact_name,
)
| [
"noreply@github.com"
] | GregKarabinos.noreply@github.com |
191e15031e0e54e43a73695692a2d3e896c4f570 | 83c8e5ef7daa8cdaedf2715e2dbb8e9284e6f143 | /IQSAR/qdb.py | ea9c1c86f28cea5b4d49f3ee5781c8b60f5d6ef9 | [] | no_license | rnaimehaom/iqsar | 9c0cb63faaac5652bdb1898a6e02f05b71e6bbb6 | 0da80a54ca44d15f74de0b4472bee9186ba21cff | refs/heads/master | 2023-03-19T02:33:51.277921 | 2019-04-26T21:11:49 | 2019-04-26T21:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,473 | py | import os
import pandas as pd
import xml.etree.ElementTree as ET
import urllib
import copy
'''Contains a qdbrep class. Declare using IQSAR.qdb.qdbrep(/absolute/path/to/unzipped/qsar-db/folder/) and perform getdescs, getyvals, getinchis, getcas functions on that object.'''
class qdbrep(object):
def __init__(self, dir):
self.dir=dir
def _getsub(self):
'''The _getsub method gets all the subfolders in the qdb folder given'''
return [name for name in os.listdir(self.dir)
if os.path.isdir(os.path.join(self.dir, name))]
def getdescs(self):
'''The getdescs method collates the data from the "descriptors" folder into a pandas dataframe. One can use the .to_csv() method from pandas to export to csv.'''
if "descriptors" in self._getsub():
descfolder=self.dir+"descriptors/"
for root, dirs, files in os.walk(descfolder):
if not dirs:
pass
else:
dfs=[]
for directorynum in range(len(dirs)):
dfs.append(pd.read_table(descfolder+str(dirs[directorynum])+"/values", index_col=0))
return pd.concat(dfs, axis=1)
else:
raise IOError("No descriptors folder present in this particular QSAR-DB!")
def getyvals(self):
'''Gets all the activity values from the "properties" folder and transposes to a pandas DataFrame'''
if "properties" in self._getsub():
propfolder=self.dir+"properties/"
for root, dirs, files in os.walk(propfolder):
if not dirs:
pass
else:
if len(dirs)==1:
return pd.read_table(propfolder+str(dirs[0])+"/values", index_col=0)
elif len(dirs)>1:
saffa=[]
for directorynum in range(len(dirs)):
saffa.append(pd.read_table(propfolder+str(dirs[directorynum])+"/values", index_col=0))
return pd.concat(saffa, axis=1)
else:
raise IOError("No properties folder present in this particular QSAR-DB!")
def getcompounds(self):
'''Extracts information from compounds/compounds.xml to a pandas DataFrame, which can be iterated over.'''
if "compounds" in self._getsub():
xmlfile=self.dir+"compounds/compounds.xml"
if xmlfile.endswith(".xml"):
tree = ET.parse(xmlfile)
root = tree.getroot()
childs=[]
for child in root:
tindex=[ele.tag for ele in child]
for n,i in enumerate(tindex):
junk,notjunk=i.split("}")
tindex[n]=notjunk
childinfo=pd.Series([ele.text for ele in child], index=tindex)#, index=pdin)
childs.append(childinfo)
mas=pd.concat(childs, axis=1)
mas2=mas.T
return mas2.set_index(keys="Id", drop=True)
else:
raise TypeError("Input file must be of type XML!")
def getmol(self,folderpath):
'''This command automatically downloads .mol files from the NIST websites to folderpath (must be written as string, i.e. /absolute/path/to/folder/). for this to work, the original QSAR-DB must have inchi files. this relies on the getinchi() method of this class. This may not work if the inchi is ambiguous and there is more than one NIST mol entry. Check the folder and the print output to check.'''
nisturl="http://webbook.nist.gov/cgi/cbook.cgi?InChIFile="
inchiseries=self.getcompounds()["InChI"]
import math
if type(folderpath)==str:
for i in inchiseries.index:
if type(inchiseries[i])==float:
print str(i)+".mol not downloaded"
else:
urllib.urlretrieve(nisturl+inchiseries[i], folderpath+str(i)+".mol")
# for inchi in inchilist:
# #print nisturl+inchi
# urllib.urlretrieve(nisturl+inchi, folderpath+str(inchilist.index(inchi))+".mol")
# print "saved "+str(folderpath)+"1~"+str(inchilist.index(inchi))+".mol"
else:
raise TypeError("Type of folderpath must be a string!")
| [
"lisagbang@gmail.com"
] | lisagbang@gmail.com |
cda22f95a8b8f123306a72b5e3853c43c639b064 | 9969462a4c2697c6dd8eb1fa9449e64383bb8cfa | /D02/1966.py | e22aa6ae7e30d7679798c7049bee6fa9de3145ff | [] | no_license | daeungdaeung/SWEA | 5ab97881a1d5acfec431c54425b1d0a20b6025b2 | bf6cd36547aa8083cc9910680ae54a569ec556f9 | refs/heads/main | 2023-03-16T01:22:40.903755 | 2021-03-14T05:55:33 | 2021-03-14T05:55:33 | 331,610,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # -*- coding: utf-8 -*-
T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
N = input()
numbers = sorted(list(map(int, input().split())))
numbers = ' '.join(list(map(str, numbers)))
print('#{} {}'.format(test_case, numbers))
| [
"kangdy0131@gmail.com"
] | kangdy0131@gmail.com |
7ffb1a1bb81a1616c2e51cff118ea72be2f444b9 | 17ca4f13eacc6844c592a5cecbebf4f243613097 | /scraping.py | 2aceea5be614489dfd89ebb1337f47206cce0240 | [
"Apache-2.0"
] | permissive | Katsutoshi-Inuga/polar_dict_test | 84bcf64b225697619784ea9bb931bc3254a3e1a0 | 06a4df059354a97de773d8682d0b7d648a9ad10c | refs/heads/master | 2020-04-26T12:27:07.458046 | 2019-03-03T08:41:20 | 2019-03-03T08:41:20 | 173,549,685 | 0 | 0 | Apache-2.0 | 2019-03-03T08:41:21 | 2019-03-03T08:30:39 | null | UTF-8 | Python | false | false | 2,905 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 02:04:20 2019
@author: hoge
"""
'''URLを扱うモジュール'''
import urllib.request
import re
import time
import pandas as pd
from bs4 import BeautifulSoup
reexp_ptn=r'^/movie/.*/[0-9]{6}/$'
ptn = re.compile(reexp_ptn)
#https://movies.yahoo.co.jp/movie/の一ページ目のタイトルとURLを取得
def scrape(url):
#[[title,url],...]
movie_data=[]
r = urllib.request.urlopen(url)
html = r.read()
parser = "html.parser"
sp = BeautifulSoup(html, parser)
for tag in sp.find_all("a"):
url = tag.get("href")
title = tag.get("title")
if url is None or title is None:
continue
if ptn.match(url):
movie_data.append([title,url])
return movie_data
#作品ページのレビューの星の点数を取得
def selectStartRating(li):
#i class="star-actived rate-100" -> 100
star_text=li.find("i",class_="star-actived")
_,rate_text=star_text.attrs.get('class')
rate=int(rate_text.replace("rate-",""))
return rate
#作品ページのレビューのタイトルを取得
def selectCommentTitle(li):
#span class="color-sub text-small"
comment_title_text=li.find("span",class_="color-sub text-small")
return comment_title_text.text
#作品ページのレビューの本文を取得
def selectCommentMain(li):
#<p class="text-xsmall text-overflow clear">
comment_title_main=li.find("p",class_="text-xsmall text-overflow clear")
rs=""
#コメントが取得できな場合があった(ネタばれのフラグがついてるものは本文がない)
if comment_title_main is not None:
rs=comment_title_main.text
return rs
#各ページ内のレビュー記事を取得する
def getReviewContents(title,url):
#[Star_rating,CommentTitle,CommentMain]
review_Data=[]
r = urllib.request.urlopen(url)
html = r.read()
parser = "html.parser"
sp = BeautifulSoup(html, parser)
revwlst = sp.find(id="revwlst")
li_list = revwlst.findAll("a" , class_="listview__element--right-icon" )
for li in li_list:
review_Data.append(
[title.strip(),
selectStartRating(li),
selectCommentTitle(li).strip(),
selectCommentMain(li).strip(),
]
)
return review_Data
if __name__ == '__main__':
'''メイン処理'''
url="https://movies.yahoo.co.jp/movie/"
movie_page_url="https://movies.yahoo.co.jp"
movie_datas = scrape(url)
reviews=[]
for movie_data in movie_datas:
movie_by_title_url = movie_page_url + movie_data[1] + "review"
time.sleep(3)
reviews.extend(getReviewContents(movie_data[0],movie_by_title_url))
print(reviews[0])
df = pd.DataFrame(reviews)
df.to_pickle("./movie_rev.pkl")
| [
"katsutoshi-inuga@nova-system.com"
] | katsutoshi-inuga@nova-system.com |
d9e074ff16cfed5d8bcc22f98c814cef1a3ee5b2 | 151ef5a1d157d51577de038f4d8f00033728d33c | /csv_reader/csv_reader.py | a68adb6c91b2007e7427d5c3d33523148bb10cdf | [] | no_license | AlexJon93/QuickText | 2a3d296b75c701d5862b822287b2bc1f52adb37b | afdee3b6413446a121669920d7a4c5b500ad07f9 | refs/heads/master | 2020-04-27T23:47:59.490657 | 2019-03-10T07:17:46 | 2019-03-10T07:17:46 | 174,788,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,582 | py | from models import member
import csv
import re
def csv_parse(csvfile):
csv_reader = csv.DictReader(csvfile)
mem_list = []
for row in csv_reader:
try:
if(csv_validate_items(row)):
first_name = row['name'].split()[0]
surname = ''.join(row['name'].split()[1:])
mem_list.append(member.Member(first_name, surname, row['student number'], row['phone number'], row['email']))
except KeyError:
print('Given csv file does not contain one of the following fields: name, student number, phone number, or email')
return
return mem_list
def csv_validate_items(member):
if csv_validate_call(member['tocall']):
return (
csv_validate_name(member['name']) and
csv_validate_student_number(member['student number']) and
csv_validate_phone(member['phone number']) and
csv_validate_email(member['email'])
)
return False
def csv_validate_name(name):
name = name.replace(' ', '')
return name.isalpha()
def csv_validate_phone(number):
number = number.replace(' ', '')
return number.isdigit() and number[0:2] == '04' and len(number) == 10
def csv_validate_student_number(number):
number = number.replace(' ', '')
return ((number[0] == 's' or number[0] == 'S') and number[1:].isdigit()) or (number.isdigit())
def csv_validate_email(email):
return re.match(r'[^@]+@[^@]+\.[^@]+', email)
def csv_validate_call(call):
return call.lower() == 'yes' or call.lower() == 'y' or call == '1' | [
"Alex.Jon.Jarvis@Gmail.com"
] | Alex.Jon.Jarvis@Gmail.com |
e04c92677cbb155c256f71a8737c2bc35ac2456a | 19212a25f5400546e0bbb05a1f225de3c3168dac | /venv/bin/f2py | 40b8b74e1fe6939c08bacab163fd5a66dcc48c5c | [] | no_license | RohanBhirangi/Backpropagation | 743ed0868b8c758b393dbb8f7b22102deea9a51d | ec335dbd56185273c1248a30a74b8eeef1152328 | refs/heads/master | 2021-09-06T03:41:59.236137 | 2018-02-02T05:22:01 | 2018-02-02T05:22:01 | 115,379,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | #!/Users/rohanbhirangi/Desktop/NeuralProject/venv/bin/python3
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| [
"rohanbhirangi@Rohans-MacBook-Pro.local"
] | rohanbhirangi@Rohans-MacBook-Pro.local | |
8ad7c9fac6126beb2635e088add4db030f20f835 | 8674907196f3a1d115291f165e691ad88a1cd205 | /train_Resnet.py | 8ee33885ee840f0ce9d7a297bf50e26295d84300 | [] | no_license | Ziqi-Zhang-CU/IEOR4720 | c03f0d15ca987e650fe137a0056ce3f28b0334a3 | 2709a70277ba9b3af47dfbee65ee41bb294d6ae1 | refs/heads/master | 2020-04-03T08:10:03.770339 | 2019-01-01T05:55:07 | 2019-01-01T05:55:07 | 155,124,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,301 | py | import sys
import os
import warnings
from model_Resnet import *
from utils import save_checkpoint
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
import numpy as np
import argparse
import json
import cv2
import dataset
import time
# command line interface
# https://docs.python.org/2/library/argparse.html
parser = argparse.ArgumentParser(description='PyTorch CSRNet')
parser.add_argument('train_json', metavar='TRAIN',
help='path to train json')
parser.add_argument('test_json', metavar='TEST',
help='path to test json')
parser.add_argument('--pre', '-p', metavar='PRETRAINED', default=None,type=str,
help='path to the pretrained model')
parser.add_argument('gpu',metavar='GPU', type=str,
help='GPU id to use.')
parser.add_argument('task',metavar='TASK', type=str,
help='task id to use.')
def main():
#save dat
save_mae = [];
save_train = [];
global args,best_prec1
best_prec1 = 1e6
args = parser.parse_args()
args.original_lr = 1e-7
args.lr = 1e-7 #learning rate
args.batch_size = 1
args.momentum = 0.95
args.decay = 5*1e-4
args.start_epoch = 0
#args.epochs = 400
args.epochs =200
args.steps = [-1,1,100,150]
args.scales = [1,1,1,1]
args.workers = 4
args.seed = time.time()
args.print_freq = 30
#load train list and val list
with open(args.train_json, 'r') as outfile:
train_list = json.load(outfile)
with open(args.test_json, 'r') as outfile:
val_list = json.load(outfile)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
torch.cuda.manual_seed(args.seed)
#set model
# ResNet 34
#model = ResNet(BasicBlock, [3, 4, 6, 3])
# ResNet18
model = ResNet(BasicBlock, [2, 2, 2, 2])
model = model.cuda()
# train loss function
criterion = nn.MSELoss(size_average=False).cuda()
#SGD
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.decay)
if args.pre: #load checkpoint
if os.path.isfile(args.pre): #if there's already pre then load
print("=> loading checkpoint '{}'".format(args.pre))
checkpoint = torch.load(args.pre)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.pre, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.pre))
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
train(train_list, model, criterion, optimizer, epoch, save_train)
prec1 = validate(val_list, model, criterion)
save_mae.append(prec1) #save MAE
is_best = prec1 < best_prec1
best_prec1 = min(prec1, best_prec1)
print(' * best MAE {mae:.3f} '
.format(mae=best_prec1))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.pre,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best,args.task)
save_mae = np.array(save_mae);
save_train = np.array(save_train);
np.save('save_mae.npy', save_mae)
np.save('save_train.npy',save_train)
#train and update weights
def train(train_list, model, criterion, optimizer, epoch, save_train):
losses = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
#train_loader loads data
train_loader = torch.utils.data.DataLoader(
# the dataset.list data function defined previously
dataset.listDataset(train_list,
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
train=True, #4 workers
seen=model.seen, # seen is set to be 0
batch_size=args.batch_size,#batch size = 1
num_workers=args.workers),
batch_size=args.batch_size)
print('epoch %d, processed %d samples, lr %.10f' % (epoch, epoch * len(train_loader.dataset), args.lr))
model.train()
end = time.time()
for i,(img, target)in enumerate(train_loader):
data_time.update(time.time() - end) # how much time to load dat a
img = img.cuda()
img = Variable(img)
output = model(img)
target = target.type(torch.FloatTensor).unsqueeze(0).cuda()
target = Variable(target)
loss = criterion(output, target)
losses.update(loss.item(), img.size(0))
optimizer.zero_grad() #backward() function accumulates gradients
loss.backward()
optimizer.step() #a parameter update based on the current gradient
batch_time.update(time.time() - end) # how much time to process this batch
end = time.time()
if i % args.print_freq == 0: #print every 30
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
save_train.append(losses.avg)
def validate(val_list, model, criterion):
print ('begin test')
# load val data
test_loader = torch.utils.data.DataLoader(
dataset.listDataset(val_list,
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]), train=False),
batch_size=args.batch_size)
model.eval()
mae = 0
for i,(img, target) in enumerate(test_loader):
img = img.cuda()
img = Variable(img)
output = model(img)
mae += abs(output.data.sum()-target.sum().type(torch.FloatTensor).cuda())
mae = mae/len(test_loader)
print(' * MAE {mae:.3f} '
.format(mae=mae))
return mae
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
'''
args.lr = args.original_lr * (0.75 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
'''
args.lr = args.original_lr
for i in range(len(args.steps)): #i = 0,1,2,3
scale = args.scales[i] if i < len(args.scales) else 1
# we could change scale in the future
#i<1 then sacale = 1 else 1
if epoch >= args.steps[i]:
args.lr = args.lr * scale
if epoch == args.steps[i]:
break # do nothing
else: # epoch < step i do nothing
break
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | Ziqi-Zhang-CU.noreply@github.com |
cd9330268acccd948ebe7171c712f4c7c268bfc7 | 68bebde9493864886ce123a11870934614e892e6 | /async_alert_test.py | 67021ae2ed89df8064b4dcd813c361f0d8477b28 | [] | no_license | lovinhence/CryptoTools | cc82e680b901d16c0b77324a17956e95eb359992 | f2b18fd13a376b6498375a8d1c835d600d329445 | refs/heads/master | 2021-09-13T15:59:22.142833 | 2018-05-01T23:42:18 | 2018-05-01T23:42:18 | 107,843,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | import time
# alert_list = [("test1", 5), ("test2", 5)]
# while True:
# for i in range(len(alert_list)):
# alert_str, ttl = alert_list[i]
# ttl -= 1
# if ttl > 0:
# alert_list[i] = (alert_str, ttl)
# else:
#
# print(alert_list)
# time.sleep(1)
stack = [1,2,3,4,5]
if len(stack) >= 5:
stack.pop()
stack.append(6)
print(stack)
| [
"lovinhence@gmail.com"
] | lovinhence@gmail.com |
5c4f01d6b99f7e424027e3f6fed94cbf10e9662a | 0f4e7c49cf4de91f5befaa4cd881c7f200ca598d | /blog/migrations/0001_initial.py | 808f4b6edfe71f284a7afff2778b92f9b4b02720 | [] | no_license | Esiguas/my-first-blog | 8eca070defd5f076a98b8dc28e637a4f1a1d6e64 | 6b94352ce4746b49b9787a8caee979c2d6d0bd6c | refs/heads/master | 2020-05-25T00:49:38.454781 | 2019-05-23T04:23:30 | 2019-05-23T04:23:30 | 187,296,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.0.13 on 2019-05-19 23:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"edwarjaviersiguasaquije@gmail.com"
] | edwarjaviersiguasaquije@gmail.com |
64163386b58731800648bf773553e91cafbf6760 | cf1373f70925a7602bcb8bf7da0f642e5348ad2d | /spiders/spider_comment.py | 6994611814c4a419ff1f83dd698c744b802bcc68 | [] | no_license | Mrsspider/xiecheng_spider_2018_03_02 | 2ec8227efed9bf4ec9dbdf055c31c7344286ad78 | 8c8f8aac98fe41e3c4edbff45f87015dcbe58190 | refs/heads/master | 2020-03-15T02:47:49.761898 | 2018-05-03T01:43:21 | 2018-05-03T01:43:21 | 131,926,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | import pymysql
from functings.spider_function import *
def main(url):
html = gethtml(url)
comment_id = re.findall('/(\d+)\.html',url)[0]
result = htmlparser(html)
title = result.xpath('//h1/text()')[0].strip()
all_comment = result.xpath('//a[@data-anchor="yhdp"]/text()')[0].strip()
max_page = re.sub('[^\d+]','',all_comment)
if not max_page:
max_page = 0
max_page = int(max_page)//5+1
L = []
for i in range(1,max_page+1):
print('正在爬取第%s页'%i)
url = 'http://huodong.ctrip.com/Activity-Booking-OnlineWebSite/Recommend/UserComments?id=%s&productName=%s&pageSize=5&pageIndex=%s'%(comment_id,title,i)
html = gethtml(url)
result = htmlparser(html)
L += parser_xc_comment(result)
return L
if __name__ == '__main__':
conn = pymysql.connect(host='127.0.0.1',port=3306,user='root',passwd='123456',charset='utf8',db='xiecheng',use_unicode=True)
cur = conn.cursor()
sql = 'SELECT comment_url,id,title FROM xc_index'
cur.execute(sql)
t = cur.fetchall()
for i in t:
url = 'http://huodong.ctrip.com' + i[0]
id = i[1]
title = i[2]
try:
L = main(url)
except Exception as e:
pass
for i in L:
user, comment, point, img = i['user'],i['comment'],i['point'],i['img']
sql = 'INSERT INTO xc_comment VALUES (null,"%s","%s","%s","%s","%s")'%(user,comment,point,img,id)
cur.execute(sql)
try:
with open('../result/%s.csv'%title,'a',encoding='utf8') as f:
f.writelines(user+'^'+comment+'^'+point+'^'+img+'\n')
except Exception as e:
pass
conn.commit()
cur.close()
conn.close()
| [
"17600583156@163.com"
] | 17600583156@163.com |
4ea7ad91d54c17b9d66cbf4ff43596777fadc66b | 0c7631ca297c9cf9409fd2066ccd7f387221a33d | /connectfour/agents/base_agent.py | 15edfdacf78758bffbdfe7b39216d4243aa1523a | [
"MIT"
] | permissive | yongjiajun/ConnectFour-AI-Bot | 281ca630d567576110ed40795de887fce9cc9eae | 87953fe752e8360e875558d6f975113640ea3b43 | refs/heads/master | 2021-06-12T13:52:17.224013 | 2021-04-27T16:16:25 | 2021-04-27T16:16:25 | 179,936,071 | 0 | 1 | MIT | 2021-04-27T16:16:26 | 2019-04-07T08:15:10 | Python | UTF-8 | Python | false | false | 1,754 | py | from connectfour.agents.computer_player import RandomAgent
import random
class TestAgent(RandomAgent):
def __init__(self, name):
super().__init__(name)
def get_move(self, board):
"""
Args:
board: An instance of `Board` that is the current state of the board.
Returns:
A tuple of two integers, (row, col)
"""
"""
These are the variables and functions for board objects which may be helpful when creating your Agent.
Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.
Board Variables:
board.width
board.height
board.last_move
board.num_to_connect
board.winning_zones
board.score_array
board.current_player_score
Board Functions:
get_cell_value(row, col)
try_move(col)
valid_move(row, col)
valid_moves()
terminal(self)
legal_moves()
next_state(turn)
winner()
"""
while True:
col = random.randint(0, board.width)
row = board.try_move(col)
if row >= 0:
break
return row, col
def evaluateBoardState(self, board, player):
"""
Your evaluation function should look at the current state and return a score for it.
As an example, the random agent provided works as follows:
If the opponent has won this game, return -1.
If we have won the game, return 1.
If neither of the players has won, return a random number.
"""
return 0
| [
"noreply@github.com"
] | yongjiajun.noreply@github.com |
ce201b168571458d67ab20cafe4ac3fd2e96fb64 | 804587e359503c20bd2711abddaef319143e26f1 | /positiveMom5-dhr.py | 7e0fe510bfe581db0b277b0bbeb221830448e32d | [] | no_license | HaoruiDing/--dhr | cf02b51863a1e93b658a73a5a0439a320b8d2c7b | 4d44078b1f6cf259d767f558275f7d35114ae0bd | refs/heads/master | 2020-07-28T14:11:28.122042 | 2019-10-18T08:42:28 | 2019-10-18T08:42:28 | 209,435,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | __author__ = 'dinghr'
import time
import numpy as np
import pandas as pd
from FactorModule.FactorBase import FactorBase
from DataReaderModule.Constants import ALIAS_FIELDS as t
class Factor(FactorBase):
def __init__(self):
super(Factor,self).__init__()
self.factorName = __name__.split('.')[-1]
self.needFields = [t.CLOSE, t.VWAP]
self.neutral=True
# put remote conn here if need extra data
def factor_definition(self):
s = time.time()
needData = self.needData
close = needData[t.CLOSE]
vwap = needData[t.VWAP]
ret = close/vwap-1
ret[ret<0]=2*ret
factor = -self.calculator.Decaylinear(x=ret,d=5)
print('factor {0} done with {1} seconds'.format(self.factorName, time.time() - s))
return factor
def run_factor(self):
self.run()
fct = Factor()
fct.run_factor() | [
"noreply@github.com"
] | HaoruiDing.noreply@github.com |
46d77a15620d1f1fdf602e2691b76dd11652c190 | c6769a12358e53c52fddfdcacb78344d446b1771 | /CastorTree/python/treemaker_data_Run2010A.py | b7d671f2946ec3bb84e2427df0c821758749b0ad | [] | no_license | xueweiphy/UACastor | d8717970b9843adc79513b51ea4c8294d89e40f3 | 91893bfb195ecd980b2afaf28e3fa045bca35745 | refs/heads/master | 2020-12-25T21:13:41.178545 | 2012-11-06T09:57:22 | 2012-11-06T09:57:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,983 | py |
import FWCore.ParameterSet.Config as cms
from UACastor.CastorTree.TightPFJetID_Parameters_cfi import TightPFJetID_Parameters as TightPFJetID_Parameters_Ref
from UACastor.CastorTree.LooseCaloJetID_Parameters_cfi import LooseCaloJetID_Parameters as LooseCaloJetID_Parameters_Ref
from UACastor.CastorTree.TightCaloJetID_Parameters_cfi import TightCaloJetID_Parameters as TightCaloJetID_Parameters_Ref
process = cms.Process("Analysis")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(2000))
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
#'dcap://maite.iihe.ac.be/pnfs/iihe/cms/store/user/hvanhaev/CastorCollisionData/CastorCollisionData_MinimumBias09_RAWRECO_GoodLumiSections_1.root'
'/store/data/Run2010A/MinimumBias/RAW-RECO/ValSkim-Dec22Skim_v2/0139/3E78417C-8222-E011-90E2-00E081791891.root',
'/store/data/Run2010A/MinimumBias/RAW-RECO/ValSkim-Dec22Skim_v2/0139/68B2B494-8822-E011-8038-0025B3E0652A.root',
'/store/data/Run2010A/MinimumBias/RAW-RECO/ValSkim-Dec22Skim_v2/0139/306AE6E7-8822-E011-9707-00E08178C071.root',
'/store/data/Run2010A/MinimumBias/RAW-RECO/ValSkim-Dec22Skim_v2/0139/58CC84DB-8F22-E011-99C6-0015170AE6E4.root',
'/store/data/Run2010A/MinimumBias/RAW-RECO/ValSkim-Dec22Skim_v2/0139/DE94526A-8F22-E011-86BA-00151715C60C.root',
'/store/data/Run2010A/MinimumBias/RAW-RECO/ValSkim-Dec22Skim_v2/0139/928BF8A0-8F22-E011-99F5-003048D45FE6.root',
'/store/data/Run2010A/MinimumBias/RAW-RECO/ValSkim-Dec22Skim_v2/0139/4C4AA090-8E22-E011-A653-003048D460B6.root',
'/store/data/Run2010A/MinimumBias/RAW-RECO/ValSkim-Dec22Skim_v2/0139/94A5E2BD-8F22-E011-A82D-0025B3E05D44.root',
)#,
# lumisToProcess = cms.untracked.VLuminosityBlockRange(
#'124009:1-124009:68'
#'124020:12-124020:94',
#'124022:69-124022:160',
#'124023:41-124023:96',
#'124024:2-124024:83',
#'124027:24-124027:39',
#'124030:1-124030:31'
#)
)
# magnetic field
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
# configure HLT
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('0 AND((40 OR 41) AND NOT (36 OR 37 OR 38 OR 39))')
# require physics declared
process.physDecl = cms.EDFilter("PhysDecl",applyfilter = cms.untracked.bool(True),HLTriggerResults = cms.InputTag("TriggerResults"))
# require primary vertex
process.oneGoodVertexFilter = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string("!isFake && ndof >= 5 && abs(z) <= 15 && position.Rho <= 2"), # tracksSize() > 3 for the older cut
filter = cms.bool(True), # otherwise it won't filter the events, just produce an empty vertex collection.
)
# selection on the rate of high purity tracks (scraping events rejection)
process.noscraping = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
# communicate with the DB
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = 'GR_R_310_V2::All' # to be used for reprocessing of 2009 and 2010 data (update JEC to Spring10 V8)
# import the JEC services
process.load('JetMETCorrections.Configuration.DefaultJEC_cff')
# I guess I should not use this old staff
# process.load("JetMETCorrections.Configuration.L2L3Corrections_900GeV_cff")
# data reconstruction starts from raw
process.load("Configuration.StandardSequences.RawToDigi_cff") # redo digi from raw data (digi not kept)
process.load("Configuration.StandardSequences.Reconstruction_cff") # redo rechit
process.castorDigis.InputLabel = 'source'
process.load("RecoLocalCalo.Castor.CastorCellReco_cfi") # redo cell
process.load("RecoLocalCalo.Castor.CastorTowerReco_cfi") # redo tower
process.load("RecoJets.JetProducers.ak7CastorJets_cfi") # redo jet
process.load("RecoJets.JetProducers.ak7CastorJetID_cfi") # redo jetid
# Final Tree
process.TFileService = cms.Service("TFileService",fileName = cms.string("CastorTree_data_Run2010A.root"))
# Event Reconstruction (need to be updated)
process.castortree = cms.EDAnalyzer('CastorTree',
StoreGenKine = cms.bool(False),
StoreGenPart = cms.bool(False),
StoreCastorDigi = cms.bool(True),
StoreCastorJet = cms.bool(True),
# input tag for L1GtTriggerMenuLite retrieved from provenance
L1GT_TrigMenuLite_Prov = cms.bool(True),
# input tag for L1GtTriggerMenuLite explicitly given
L1GT_TrigMenuLite = cms.InputTag('l1GtTriggerMenuLite'),
L1GT_ObjectMap = cms.InputTag('hltL1GtObjectMap','','HLT'),
hepMCColl = cms.InputTag('generator','','HLT'),
genPartColl = cms.InputTag('genParticles','','HLT'),
CastorTowerColl = cms.InputTag('CastorTowerReco', '','Analysis'),
CastorDigiColl = cms.InputTag('castorDigis', '','Analysis'),
CastorRecHitColl = cms.InputTag('castorreco','','Analysis'),
BasicJet = cms.InputTag('ak7BasicJets','','Analysis'),
CastorJetID = cms.InputTag('ak7CastorJetID','','Analysis'),
PFJetColl = cms.InputTag('ak5PFJets', '', 'RECO'),
PFJetJEC = cms.string('ak5PFL2L3Residual'), # L2L3Residual JEC should be applied to data only
PFJetJECunc = cms.string('AK5PF'),
CaloJetColl = cms.InputTag('ak5CaloJets','','RECO'),
CaloJetId = cms.InputTag('ak5JetID','','RECO'),
CaloJetJEC = cms.string('ak5CaloL2L3Residual'), # L2L3Residual JEC should be applied to data only
CaloJetJECunc = cms.string('AK5Calo'),
CaloTowerColl = cms.InputTag('towerMaker','','RECO'),
TightPFJetID_Parameters = TightPFJetID_Parameters_Ref,
LooseCaloJetID_Parameters = LooseCaloJetID_Parameters_Ref,
TightCaloJetID_Parameters = TightCaloJetID_Parameters_Ref,
JetPtCut = cms.double(8.0), # Jet Pt > 8 GeV at 900 GeV and 2.36 TeV
JetEtaCut = cms.double(2.5),
requested_hlt_bits = cms.vstring('')
)
# list of processes
process.p = cms.Path(process.physDecl*process.hltLevel1GTSeed*process.oneGoodVertexFilter*process.noscraping
*process.castorDigis*process.castorreco*process.CastorCellReco*process.CastorTowerReco*process.ak7BasicJets*process.ak7CastorJetID*process.castortree)
| [
""
] | |
29b8f514e60d5fbaeb70814d543984d569fcfb8d | bde2f1c54d0623668bcf4a632c6ec888e5842e0c | /python/emails/dailyBurnDown.py | bfbe4bc6731ae59af3eab06ad2542a006e542c22 | [] | no_license | donofden/vsts-mini | 59f4956db1a6b6dfe03c6a9acfcef3ecbff3c2f4 | 73f3d7d29ce5f6f9f8201f34faa15a07414192cc | refs/heads/master | 2023-02-19T11:39:55.986728 | 2022-11-14T14:54:25 | 2022-11-14T14:54:25 | 146,557,074 | 6 | 1 | null | 2023-02-15T21:42:06 | 2018-08-29T06:44:19 | Python | UTF-8 | Python | false | false | 3,379 | py | import sendEmail
import psycopg2
import logging
import time
import json
from decimal import *
from re import escape
from datetime import datetime
from pathlib import Path
from inspect import getsourcefile
import os.path
import sys
current_path = os.path.abspath(getsourcefile(lambda:0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import databaseOperations as db
# Get all Teams
teams = db.select_all_records('teams', 'project_id,team_id,name,email')
decoded_hand = json.loads(teams)
teamsTuple = tuple(decoded_hand)
for team in teamsTuple:
# Create the plain-text and HTML version of your message
contents = Path("dailyBurnDownHTML.txt").read_text()
contents = contents.replace("VSTS_TEAM_NAME", team['name'])
# Get iterations of the Teams
today = datetime.date(datetime.now())
# Fetch the current iteration of the team
condition = "team_id='"+ team['team_id'] +"' AND '"+ str(today) +"' between start_date and finish_date"
iterations = db.select_records('iterations', 'iteration_id,name,start_date,finish_date', condition)
decoded_hand = json.loads(iterations)
iterationsTuple = tuple(decoded_hand)
for iteration in iterationsTuple:
board_dict = {}
contents = contents.replace("VSTS_SPRINT_NO", iteration['name'])
contents = contents.replace("VSTS_SPRINT_START_DATE", iteration['start_date'][0:10])
contents = contents.replace("VSTS_SPRINT_END_DATE", iteration['finish_date'][0:10])
# Get all Teams
# workitems = db.select_all_records('workitems', 'workitem_no,column_name,story_point')
condition_one = " work_item_type != 'Task' AND iteration_id = '"+ iteration['iteration_id'] +"' AND DATE(created_date) = '"+ str(today) +"' "
workitems = db.select_records('workitems', 'workitem_no,column_name,story_point', condition_one)
workitem_decoded_hand = json.loads(workitems)
workitemsTuple = tuple(workitem_decoded_hand)
for workitem in workitemsTuple:
if workitem['column_name'] in board_dict:
new_Value = float(board_dict[workitem['column_name']].get('point')) + float(workitem['story_point'])
no_of_card = board_dict[workitem['column_name']].get('items') + 1
board_dict[workitem['column_name']] = {'point': new_Value, 'items': no_of_card}
else:
board_dict[workitem['column_name']] = {'point': workitem['story_point'], 'items': 1}
htmlTabels = ""
number = 1
for key, value in board_dict.items():
newValue = str(value['point'])
htmlTabels = htmlTabels + """\
<tr>
<th class="smarttable-header">"""+ str(number) +"""</th>
<td class="smarttable-header">"""+ key +"""</td>
<td class="smarttable-header">"""+ str(value['items']) +"""</td>
<td class="smarttable-header">"""+ str(value['point']) +"""</td>
</tr>
"""
number += 1
contents = contents.replace("BURNDOWN_COLUMN_SPLITUP", htmlTabels)
html = """\
""" + contents + """
"""
subject = "VSTS-MINI: Sprint BurnDown " + team['name']
sendEmail.send(team['email'], subject, html)
#sys.exit(0)
| [
"aravindkumar.ganesan@gmail.com"
] | aravindkumar.ganesan@gmail.com |
c7b8c670e66aeb00f430cf48793ce0ab5726fc35 | 4b5065a3e43fc14f61af27e07b8ef06f92ddb5c4 | /6kyu_Are_they_the_same.py | 0841c2936d6355c64cc7e4cf25ba3cff3f04cf6b | [] | no_license | WPrintz/CodeWars | bc6aa23530bb750d34a360ba593d26edb197c13c | 94b9f8b08a8fd6e1b5ec8b4a6a21232e99042d4c | refs/heads/master | 2020-05-23T09:08:50.446838 | 2017-08-04T19:53:43 | 2017-08-04T19:53:43 | 80,437,464 | 1 | 2 | null | 2017-02-16T21:37:48 | 2017-01-30T16:07:26 | Python | UTF-8 | Python | false | false | 2,111 | py | '''
Are they the "same"?
https://www.codewars.com/kata/550498447451fbbd7600041c
Solved 01-24-2017
Description: 6 kyu
Given two arrays a and b write a function comp(a, b) (compSame(a, b) in Clojure) that checks whether the two arrays have the "same" elements, with the same multiplicities. "Same" means, here, that the elements in b are the elements in a squared, regardless of the order.
Examples
Valid arrays
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [121, 14641, 20736, 361, 25921, 361, 20736, 361]
comp(a, b) returns true because in b 121 is the square of 11, 14641 is the square of 121, 20736 the square of 144, 361 the square of 19, 25921 the square of 161, and so on. It gets obvious if we write b's elements in terms of squares:
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [11*11, 121*121, 144*144, 19*19, 161*161, 19*19, 144*144, 19*19]
Invalid arrays
If we change the first number to something else, comp may not return true anymore:
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [132, 14641, 20736, 361, 25921, 361, 20736, 361]
comp(a,b) returns false because in b 132 is not the square of any number of a.
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [121, 14641, 20736, 36100, 25921, 361, 20736, 361]
comp(a,b) returns false because in b 36100 is not the square of any number of a.
Remarks
a or b might be [] (all languages). a or b might be nil or null or None (except in Haskell, Elixir, C++).
If a or b are nil (or null or None), the problem doesn't make sense so return false.
If a or b are empty the result is evident by itself.
Note for C
The two arrays have the same size (> 0) given as parameter in function comp.
'''
def comp(array1, array2):
out = True
if type(array1) == list and type(array2) == list:
if len(array1) != len(array2) == 0 or (None in array1 or None in array2) : out = False
else:
for i in range(0, len(array1)):
if array1[i] ** 2 not in array2 : out = False
for i in range(0, len(array2)):
if array2[i] ** 0.5 not in array1 : out = False
else: out = False
return out | [
"jnoholds@gmail.com"
] | jnoholds@gmail.com |
2eac43f1af1e4da0fc06f5e28599b431529b8adc | b326dc4b12c138e0b55cb336f1d4f70959983a4a | /articles/views.py | 9dfd7fb7546069b08127da715c7563153f1dc439 | [] | no_license | dlasyd/b101 | e59457623f8cfeb49d048914f79bb106df3670f0 | fde819ce3b8d8bec76ec7d51396a7af0f9078923 | refs/heads/master | 2020-06-23T13:40:58.006361 | 2016-09-05T21:28:06 | 2016-09-05T21:28:06 | 66,212,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,809 | py | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import get_list_or_404, get_object_or_404, redirect
from django.http import Http404
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
import json
from .models import Article, Category
class PaginatorMixin:
def select_paginated(self, articles, **kwargs):
paginator = Paginator(articles, self.paginate_by)
try:
articles = paginator.page(self.kwargs['page'])
except KeyError:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
return articles
class ArticleDetailed(DetailView):
model = Article
template_name = 'articles/article-detailed.html'
def get_queryset(self):
return Article.objects.published()
class CategoryList(ListView):
template_name = 'articles/articles-by-category-list.html'
context_object_name = 'articles'
paginate_by = 10
def get_queryset(self):
self.cat = get_object_or_404(Category, slug=self.kwargs['slug'])
return get_list_or_404(Article.objects.filter(category=self.cat, state='3'))
def get_context_data(self, **kwargs):
context = super(CategoryList, self).get_context_data(**kwargs)
context['category'] = self.cat
articles = Article.objects.published_in_category(category=self.cat)
paginator = Paginator(articles, self.paginate_by)
try:
articles = paginator.page(self.kwargs['page'])
except KeyError:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
context['articles'] = articles
return context
class AllArticlesList(ListView, PaginatorMixin):
model = Article
template_name = 'articles/all-articles-list.html'
context_object_name = 'articles'
paginate_by = 10
def get_queryset(self):
return Article.objects.published()
def get_context_data(self, **kwargs):
context = super(AllArticlesList, self).get_context_data(**kwargs)
articles = Article.objects.published()
paginator = Paginator(articles, self.paginate_by)
try:
articles = paginator.page(self.kwargs['page'])
except KeyError:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
context['articles'] = articles
return context
def legacy_redirect(request, slug):
article = get_object_or_404(Article, slug=slug)
if article.legacy:
return redirect('article-view', slug=slug, permanent=True)
else:
raise Http404("Article does not exist")
| [
"malevanyy@gmail.com"
] | malevanyy@gmail.com |
a731a45a320bf8f9d842d1777a034237eb7216ca | 226af2dc7e58189104337ede8a98926ba58d61fb | /Web_Python/week_4/routing/urls.py | 78646e6362cd2d631205876f2fe1192abd93925b | [] | no_license | Termoplane/Coursera | 4378c65b97a4b8ff7ae747adeb7dfb2b66924fe9 | aad9d66549899d8f1b673ede2b7184fbde722c51 | refs/heads/master | 2021-07-01T17:24:41.500645 | 2020-05-03T13:25:20 | 2020-05-03T13:25:20 | 237,803,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^simple_route/$', views.simple_route),
url(r'^slug_route/([a-z0-9-_]{1,16})/$', views.slug_route),
url(r'^sum_route/(-?\d+)/(-?\d+)/$', views.sum_route),
url(r'^sum_get_method/$', views.sum_get_method),
url(r'^sum_post_method/$', views.sum_post_method),
] | [
"alex223666@gmail.com"
] | alex223666@gmail.com |
6f9e09398ee6dce3c121ff01239fdf659c43cd2f | 99601eae750c9d2b7a5552f9e584e7cba6f306b6 | /Classifier/preprocess.py | 79276e6de323278c7017dd01c406e83be50b6874 | [
"MIT"
] | permissive | aitikgupta/EmotionRecog | 0d799f3973bf22cd7b4a74cd048d57aa75258df7 | 2e440e46ed0b67bf7432f49135901e59ba9203d8 | refs/heads/master | 2023-05-02T17:21:15.763197 | 2020-08-20T16:52:45 | 2020-08-20T16:52:45 | 284,590,389 | 1 | 0 | MIT | 2021-05-25T04:31:56 | 2020-08-03T03:04:39 | Python | UTF-8 | Python | false | false | 408 | py | from dataset import getData
from utils import balance_class, give_train_test_splits
def preprocess_data(filename='/content/drive/My Drive/fer2013.csv', image_size=(48, 48)):
X, Y = getData(filename)
num_class = len(set(Y))
# balance = balance_class(Y)
N, D = X.shape
X = X.reshape(N, image_size, 1)
return give_train_test_splits(X, Y, test_size=0.1, random_state=0), num_class | [
"aitikgupta@gmail.com"
] | aitikgupta@gmail.com |
bd871487d9ff885a9d7179d519d66adfe7a8c26d | 286e304aa95c0bfb94540cdc41c9d0a2eeeef3f7 | /graph.py | 6fd2cee01e77859e74d183b51b6737381d176a5c | [] | no_license | czhbruce/Miniproject-Two | 80ff3b11aa14b2ff6a340609475559f2dc49c03d | 123d9ba92e77c075d0e684892fe56006a26bb431 | refs/heads/master | 2020-04-11T00:43:02.000969 | 2016-09-12T13:22:51 | 2016-09-12T13:22:51 | 68,010,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,637 | py | import os
import re
import sys
import urllib
import requests
import networkx as nx
import matplotlib as plt
import itertools
def Download_page():
for i in range(2,27):
url = 'http://www.newyorksocialdiary.com/party-pictures?page=%d' %i
dir = './Pagelist'
webname = 'Page%d' %i
urllib.urlretrieve(url, os.path.join(dir, webname))
return
def Urllist():
return
def Output(list, filename):
f = open(filename, 'w')
f.write("\n".join(list))
f.close()
return
def Out(text, filename):
f = open(filename, 'w')
f.write(text)
f.close()
return
def Url_final():
url_final = []
for i in range(2,27):
pagename = '/Users/HumbleBoy/Documents/Data_Incubator/miniprojects/graph/Pagelist/Page%d' %i
urlfile = urllib.urlopen(pagename)
text = urlfile.read()
urls = re.findall(r'<span class="field-content"><a href="(\S*)"', text)
domain = 'http://www.newyorksocialdiary.com/'
for url in urls:
if '2015' not in url:
url_comp = domain + url
url_final.append(url_comp)
Output(url_final, 'Pre2014URLs')
return
def Download_photopages():
#Download Photo pages
urlfile = urllib.urlopen('Pre2014URLs')
text = urlfile.read()
url_final = text.split('\n')
for i in range(2, 1202):
dir = './Webpage'
url = url_final[i]
webname = 'photo%d' %i
urllib.urlretrieve(url, os.path.join(dir, webname))
return
def Alltext():
#extract all text from photo webpages
filename = 'AllText'
f = open(filename, 'w')
for i in range(2, 1202):
pagename = '/Users/HumbleBoy/Documents/Data_Incubator/miniprojects/graph/Webpage/photo%d' %i
urlfile = urllib.urlopen(pagename)
#alltext += urlfile.read() + '\n'
f.write(urlfile.read())
f.close()
return
def Get_names(lines):
word = lines.split()
l = len(word)
list = []
for i in range(l/2):
list.extend([(word[i] + ' ' + word[i+1])])
del word[0]
return list
def Add_edges(graph, crowd_names):
allconnections = []
l = len(crowd_names)
if l == 1:
return
else:
for i in range(l-1):
for j in range(1,l):
allconnections.append((crowd_names[i],crowd_names[j]))
graph.add_edges_from(allconnections)
return
def Last(tuple):
return tuple[-1]
def main():
#Download_page()
#Url_final()
#Download_photopages()
#Alltext()
#extract alltext under each photo
pagename = 'AllText'
urlfile = urllib.urlopen(pagename)
text = urlfile.read()
nametext = re.findall(r'<td valign="top"><div align="center" class="photocaption"> (.*?)</', text)
Output(nametext, 'NameText')
#Delete fwords from NameText
#for line in nametext[0:2]:
#nameparts = re.findall(r'([A-Z][\S]*)\W', line)
# filter1 = re.sub(r'([A-Z].*)\s', line)
#Get individual names,filter out non-name words
fwords = ['event', 'co-chairs','co-chair','dr', 'mayor','new','york', 'board', 'member',
'executive', 'director', 'trustees', 'steering', 'committee', 'city', 'mr', 'mrs', 'miss',
'ms','vice', 'president','medical', 'center','benefit','fund','special','surgery',
'big', 'c"', 'trustee', 'girl', 'scout', 'gala', 'chair', 'cooper', 'union', 'honoree',
'historic', 'valley','editor', 'author','ph.d','museum', 'vice-chair', 'fall', 'house'
, 'guest'] #need lower-cased
#fwords = []
#method from slack
#reg = '|'.join(fwords)
#nametext_filtered = []
#for line in nametext:
# nametext_filtered.append(re.sub(reg, '', line))
name_elements2 = []
name_elements = []
#check=0
for line in nametext:
names_line = re.findall(r'([A-Z][a-zA-Z]+\s[A-Z][-\'a-zA-Z]*)', line)
#check += len(names_line)
name_elements2.extend([(' '.join(names_line))])
#print 'found %d names' %(check)
#print (','.join(names_line
check=0
#removecount = 0
for line in name_elements2:
nameparts = line.split()
nameparts_copy = nameparts[:]
for word in nameparts:
if word.lower() in fwords:
nameparts_copy.remove(word)
#removecount += 1
name_elements.extend([(' '.join(nameparts_copy))])
#print 'remove %d times' %removecount
"""#my method
name_elements = []
for line in nametext:
nameparts = re.findall(r'([A-Z]\S*[a-z])\W', line)
#nameparts = re.findall(r'([A-Z][\S]*)\W', line)
nameparts_copy = nameparts[:]
for word in nameparts:
if word.lower() in fwords:
nameparts_copy.remove(word)
name_elements.append(' '.join(nameparts_copy))
#Output(name_elements,'Name Elements')
"""
"""
#make namelist
namelist = []
for crowd in name_elements:
crowd_names = Get_names(crowd)
for name in crowd_names:
if name not in namelist:
namelist.append(name)
Output(namelist, 'Namelist')
print len(namelist)
"""
#Add edges to people in the same photo
graph = nx.MultiGraph()
for crowd in name_elements:
crowd_names = Get_names(crowd)
Add_edges(graph, crowd_names)
"""
"""
#Q1-degree
graph = nx.MultiGraph()
for crowd in name_elements:
crowd_names = Get_names(crowd)
Add_edges(graph, crowd_names)
d = {}
urlfile = urllib.urlopen('Namelist')
namelist = urlfile.read().split('\n')
for name in namelist:
d[name] = len(graph.edges(name))
deg100 = sorted(d.items(), key = Last, reverse = True)[:100]
print 'Degree: \n', deg100
"""
#Best Friends
print 'start best friends'
b = {}
count = 0
for pair in itertools.combinations(namelist,2):
count += 1.0
temp = graph.number_of_edges(pair[0], pair[1])
if temp > 8:
b[pair] = temp
print 'In progress:', '{:1.2f}'.format(count/(18000000.0)), '% finished'
bf100 = sorted(b.items(), key = Last, reverse = True)[:100]
print 'BestFriends:\n', bf100
"""
#Pagerank
graph = nx.Graph()
for crowd in name_elements:
crowd_names = Get_names(crowd)
Add_edges(graph, crowd_names)
pr = nx.pagerank(graph)
pagerank100 = sorted(pr.items(), key = Last, reverse = True)[:100]
print 'Pagerank:\n', pagerank100
"""
#networkx package example
G=nx.Graph()
G.add_node("a")
G.add_nodes_from(["b","c"])
G.add_edge(1,2)
G.add_edge(1,2)
edge = ("d", "e")
G.add_edge(*edge)
edge = ("a", "b")
G.add_edge(*edge)
G.add_edges_from([("a","c"),("c","d"), ("a",1), (1,"d"), ("a",2)])
print G.number_of_edges('a','a')
#G.edges('d') gives all edges connected to 'd'
#G.number_of_edges('a','b') #gives #of edges between two nodes
for i in range(2,5):
url = 'http://www.newyorksocialdiary.com/party-pictures?page=%d' %i
r = requests.get(url)
#output to a file
filename = 'Pages%d.txt' %i
f = open(filename, 'w')
f.write(r.text)
f.close()
"""
if __name__ == "__main__":
main()
#request futures package
#futures
"""
This is my regex for capturing first names only `(^[A-Z][a-zA-Z]+$)`
[11:21]
And this is for first name + last name `([A-Z][a-zA-Z]+\s[A-Z][-\'a-zA-Z]*)`
[11:23]
Both of those are mutually exclusive, i.e. they both return non-overlapping strings
""" | [
"noreply@github.com"
] | czhbruce.noreply@github.com |
512207d5f2dc40ee2795795e89637c8c548255c1 | e159bfaa2eb5aff21f5d44d8f07a5e435937a551 | /data/utils/remove_users.py | f984880bad75b19c11dfb913806393e04051b312 | [
"BSD-2-Clause"
] | permissive | dssaenzml/federated_learning_nlp | fe4dbaa60a4caacd9b231d0859d2385465656707 | b48fbeb3e78af5971885337203504c017ef1553b | refs/heads/main | 2023-04-11T00:05:19.025000 | 2021-04-14T10:07:52 | 2021-04-14T10:07:52 | 347,849,479 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,366 | py |
'''
removes users with less than the given number of samples
'''
import argparse
import json
import os
from constants import DATASETS
parser = argparse.ArgumentParser()
parser.add_argument('--name',
help='name of dataset to parse; default: sent140;',
type=str,
choices=DATASETS,
default='sent140')
parser.add_argument('--min_samples',
help='users with less than x samples are discarded; default: 10;',
type=int,
default=10)
args = parser.parse_args()
print('------------------------------')
print('removing users with less than %d samples' % args.min_samples)
parent_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dir = os.path.join(parent_path, args.name, 'data')
subdir = os.path.join(dir, 'sampled_data')
files = []
if os.path.exists(subdir):
files = os.listdir(subdir)
if len(files) == 0:
subdir = os.path.join(dir, 'all_data')
files = os.listdir(subdir)
files = [f for f in files if f.endswith('.json')]
for f in files:
users = []
hierarchies = []
num_samples = []
user_data = {}
file_dir = os.path.join(subdir, f)
with open(file_dir, 'r') as inf:
data = json.load(inf)
num_users = len(data['users'])
for i in range(num_users):
curr_user = data['users'][i]
curr_hierarchy = None
if 'hierarchies' in data:
curr_hierarchy = data['hierarchies'][i]
curr_num_samples = data['num_samples'][i]
if (curr_num_samples >= args.min_samples):
user_data[curr_user] = data['user_data'][curr_user]
users.append(curr_user)
if curr_hierarchy is not None:
hierarchies.append(curr_hierarchy)
num_samples.append(data['num_samples'][i])
all_data = {}
all_data['users'] = users
if len(hierarchies) == len(users):
all_data['hierarchies'] = hierarchies
all_data['num_samples'] = num_samples
all_data['user_data'] = user_data
file_name = '%s_keep_%d.json' % ((f[:-5]), args.min_samples)
ouf_dir = os.path.join(dir, 'rem_user_data', file_name)
print('writing %s' % file_name)
with open(ouf_dir, 'w') as outfile:
json.dump(all_data, outfile)
| [
"noreply@github.com"
] | dssaenzml.noreply@github.com |
5c08b90e8a537566f3d0f68ef3b7cfc3bd16c5a2 | 6909efd8a5fdcf5df6e2d1757372b788cbcfa41b | /db/Proxy.py | 4c660ca6a2d3f65c6d70868d15049e7d62d19dc6 | [
"MIT"
] | permissive | dangod/ProxyPoolWithUI | 80899f98130dfb0b02e8ad1b7be5a298d543c44c | f04e95fd32f877c086e2b4787dd98c8c1a3aa856 | refs/heads/main | 2023-03-23T21:37:12.823905 | 2021-03-17T05:39:33 | 2021-03-17T05:39:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,596 | py | # encoding: utf-8
import datetime
class Proxy(object):
"""
代理,用于表示数据库中的一个记录
"""
ddls = ["""
CREATE TABLE IF NOT EXISTS proxies
(
fetcher_name VARCHAR(255) NOT NULL,
protocal VARCHAR(32) NOT NULL,
ip VARCHAR(255) NOT NULL,
port INTEGER NOT NULL,
validated BOOLEAN NOT NULL,
validate_date TIMESTAMP,
to_validate_date TIMESTAMP NOT NULL,
validate_failed_cnt INTEGER NOT NULL,
PRIMARY KEY (protocal, ip, port)
)
""",
"""
CREATE INDEX IF NOT EXISTS proxies_fetcher_name_index
ON proxies(fetcher_name)
""",
"""
CREATE INDEX IF NOT EXISTS proxies_to_validate_date_index
ON proxies(to_validate_date ASC)
"""]
def __init__(self):
self.fetcher_name = None
self.protocal = None
self.ip = None
self.port = None
self.validated = False
self.validate_date = None
self.to_validate_date = datetime.datetime.now()
self.validate_failed_cnt = 0
def params(self):
"""
返回一个元组,包含自身的全部属性
"""
return (
self.fetcher_name,
self.protocal, self.ip, self.port,
self.validated, self.validate_date, self.to_validate_date, self.validate_failed_cnt
)
def to_dict(self):
"""
返回一个dict,包含自身的全部属性
"""
return {
'fetcher_name': self.fetcher_name,
'protocal': self.protocal,
'ip': self.ip,
'port': self.port,
'validated': self.validated,
'validate_date': str(self.validate_date) if self.validate_date is not None else None,
'to_validate_date': str(self.to_validate_date) if self.to_validate_date is not None else None,
'validate_failed_cnt': self.validate_failed_cnt
}
@staticmethod
def decode(row):
"""
将sqlite返回的一行解析为Proxy
row : sqlite返回的一行
"""
assert len(row) == 8
p = Proxy()
p.fetcher_name = row[0]
p.protocal = row[1]
p.ip = row[2]
p.port = row[3]
p.validated = bool(row[4])
p.validate_date = row[5]
p.to_validate_date = row[6]
p.validate_failed_cnt = row[7]
return p
def validate(self, success):
"""
传入一次验证结果,根据验证结果调整自身属性,并返回是否删除这个代理
success : True/False,表示本次验证是否成功
返回 : True/False,True表示这个代理太差了,应该从数据库中删除
"""
if success: # 验证成功
self.validated = True
self.validate_date = datetime.datetime.now()
self.validate_failed_cnt = 0
self.to_validate_date = datetime.datetime.now() + datetime.timedelta(minutes=5) # 5分钟之后继续验证
return False
else:
self.validated = False
self.validate_date = datetime.datetime.now()
self.validate_failed_cnt = self.validate_failed_cnt + 1
# 验证失败的次数越多,距离下次验证的时间越长
delay_minutes = self.validate_failed_cnt * 5
self.to_validate_date = datetime.datetime.now() + datetime.timedelta(minutes=delay_minutes)
if self.validate_failed_cnt >= 3:
return True
else:
return False
| [
"believe.chenyu@gmail.com"
] | believe.chenyu@gmail.com |
5f57f6473fb8cdbca4cc4ee0af9068cf6a8b3399 | 9dee425a7c020972e7e7ee7416428bb9c38065b4 | /PanoSeg/RF-mmdetection/configs/rfnext/rfnext_fixed_single_branch_cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py | 4313607089129709acf54dce7dcb5d53cd4cd88f | [
"Apache-2.0"
] | permissive | MCG-NKU/ExperiCV | 83a2f5430716b08ad89820c4728350b151ad0e93 | a572952b33fb7a1da1bef5fbb9b357f1cc1274c9 | refs/heads/main | 2023-05-23T14:13:49.152595 | 2023-01-01T09:42:14 | 2023-01-01T09:42:14 | 571,400,533 | 12 | 2 | null | 2022-12-25T11:30:47 | 2022-11-28T03:21:44 | Jupyter Notebook | UTF-8 | Python | false | false | 1,174 | py | _base_ = '../hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py'
# model settings
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256),
rfsearch_cfg=dict(
mode='fixed_single_branch',
rfstructure_file= # noqa
'./configs/rfnext/search_log/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/local_search_config_step11.json', # noqa
config=dict(
search=dict(
step=0,
max_step=12,
search_interval=1,
exp_rate=0.5,
init_alphas=0.01,
mmin=1,
mmax=24,
num_branches=2,
skip_layer=['stem', 'layer1'])),
))
custom_hooks = [
dict(
type='RFSearchHook',
config=model['rfsearch_cfg']['config'],
mode=model['rfsearch_cfg']['mode'],
),
]
| [
"zhengyuanxie2000@gmail.com"
] | zhengyuanxie2000@gmail.com |
89100f2f7b48de62a884d9b36f7c51e2e31a7150 | adbb149cba420b9dd11825c2d4af6998b00fc1c7 | /server/data/events_data.py | 243dc1966f786b61ea0353a97f1a1142c22af504 | [] | no_license | JessKaria/project-4 | 36099d85479371f3d12aaf84e1f82fc261f51573 | 7a6cdcfe76df0837d24b74bc675d4c6926ba0692 | refs/heads/main | 2023-04-05T10:06:27.308408 | 2021-04-09T10:39:51 | 2021-04-09T10:39:51 | 346,420,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,623 | py | from models.events import Event
from models.category import Category
list_categories = [
Category(category="business"),
Category(category="food"),
Category(category="health"),
Category(category="music"),
Category(category="charity"),
Category(category="community"),
Category(category="fashion"),
Category(category="film"),
Category(category="hobbies"),
Category(category="government"),
Category(category="science"),
]
list_events = [
Event(
name="Tupac Live at the Apollo!!",
date="21st June",
start_time="2pm",
duration="1 day",
description="Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.",
target_age=30,
expected_attendees=20000,
location="Dubai",
image="https://images.unsplash.com/photo-1492684223066-81342ee5ff30?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1350&q=80",
user_id=1,
category=list_categories),
Event(
name="Beyonce Live in Concert",
date="21st June",
start_time="2pm",
duration="1 day",
description="Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.",
target_age=50,
expected_attendees=40000,
location="London",
image="https://images.unsplash.com/photo-1478147427282-58a87a120781?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1350&q=80",
user_id=2),
Event(
name="Shaggy Live in Concert",
date="20th June",
start_time="2pm",
duration="1 day",
description="Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.",
target_age=30,
location="Germany",
expected_attendees=40000,
image="https://images.unsplash.com/photo-1541683142766-bd6163178577?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1350&q=80",
user_id=3),
Event(
name="Glastonbury",
date="20th June",
start_time="3pm",
duration="1 day",
description="Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.",
target_age=30,
location="Sydney",
expected_attendees=40000,
image="https://images.unsplash.com/photo-1598387992619-f86d5293bace?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1055&q=80",
user_id=4),
Event(
name="Secret Garden Party",
date="20th June",
start_time="2pm",
duration="1 day",
description="Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.",
target_age=30,
location="Birmingham",
expected_attendees=40000,
image="https://images.unsplash.com/photo-1533174072545-7a4b6ad7a6c3?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80",
user_id=5),
Event(
name="Beer festival",
date="20th June",
start_time="2pm",
duration="1 day",
description="Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.",
target_age=30,
location="Berlin",
expected_attendees=40000,
image="https://images.unsplash.com/photo-1506157786151-b8491531f063?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80",
user_id=6),
Event(
name="Usher live in Concert",
date="20th June",
start_time="2pm",
duration="1 day",
description="Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.",
target_age=30,
location="Tokyo",
expected_attendees=40000,
image="https://images.unsplash.com/photo-1514605411468-81f98844a475?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80",
user_id=1),
]
| [
"jesskaria1@gmail.com"
] | jesskaria1@gmail.com |
30563f1f0d1d655fea8cc0dad2b55e5530bab2b8 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-cph/huaweicloudsdkcph/v1/model/list_resource_instances_request.py | 0823fdc434041d9670e7c3631928d7a2eaaf42b5 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,217 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListResourceInstancesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_type': 'str',
'body': 'ListResourceInstancesRequestBody'
}
attribute_map = {
'resource_type': 'resource_type',
'body': 'body'
}
def __init__(self, resource_type=None, body=None):
"""ListResourceInstancesRequest
The model defined in huaweicloud sdk
:param resource_type: 资源类型。 - cph-server,云手机服务器
:type resource_type: str
:param body: Body of the ListResourceInstancesRequest
:type body: :class:`huaweicloudsdkcph.v1.ListResourceInstancesRequestBody`
"""
self._resource_type = None
self._body = None
self.discriminator = None
self.resource_type = resource_type
if body is not None:
self.body = body
@property
def resource_type(self):
"""Gets the resource_type of this ListResourceInstancesRequest.
资源类型。 - cph-server,云手机服务器
:return: The resource_type of this ListResourceInstancesRequest.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""Sets the resource_type of this ListResourceInstancesRequest.
资源类型。 - cph-server,云手机服务器
:param resource_type: The resource_type of this ListResourceInstancesRequest.
:type resource_type: str
"""
self._resource_type = resource_type
@property
def body(self):
"""Gets the body of this ListResourceInstancesRequest.
:return: The body of this ListResourceInstancesRequest.
:rtype: :class:`huaweicloudsdkcph.v1.ListResourceInstancesRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ListResourceInstancesRequest.
:param body: The body of this ListResourceInstancesRequest.
:type body: :class:`huaweicloudsdkcph.v1.ListResourceInstancesRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListResourceInstancesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
056124ade6036e7d9c1b4817404a25f132abcf7f | ecba842cc189499da2c98248e92a458dbcc0dc67 | /apps/website/privacy/urls.py | 59aa42a56262eec8c222c517c823f3eb3f7c6516 | [] | no_license | aquaristar/hhlearn | c23e94ab93221419db74409f44d8310244212190 | ec409b7886bacb33cd3f5c3a724243a30158cd54 | refs/heads/master | 2023-03-10T15:46:39.740438 | 2019-11-16T19:19:02 | 2019-11-16T19:19:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('apps.website.privacy.views',
url(r'^privacy/$', 'privacy', name='privacy'),
) | [
"aquaristar@gmail.com"
] | aquaristar@gmail.com |
a34435840db9da53d3018c4c656ccd70c7b8a749 | 7c28fc07bd78f5e0387b0cb573736d8c5affb1c9 | /Grad_Cam/grad_cam.py | b653f3813bbd40fbd187fa59ad9b7babbb11d537 | [] | no_license | Tobias-Frenger/ai_thesis | a84d0b891e3eda240cd2415e7ae84825e45a9b9d | 6f81c6bb0d6386b6dacc7e9a4d557760c77e891e | refs/heads/master | 2022-04-12T00:35:01.474646 | 2020-03-26T09:09:36 | 2020-03-26T09:09:36 | 250,200,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 23:22:43 2020
@author: Tobias
"""
import tensorflow as tf
from tensorflow import reshape
from tensorflow.keras import models
from tensorflow.keras import backend as K
import numpy as np
import cv2
import matplotlib.pyplot as plt
def grad_cam(model, layer_name, img_location):
im = img_location
image = cv2.imread(im, cv2.IMREAD_UNCHANGED)
dim = (128,128)
img_tensor = cv2.resize(image, dim, interpolation = cv2.INTER_CUBIC)
#img_tensor = cv2.cvtColor(img_tensor, cv2.COLOR_BGR2RGB)
img_tensor = tf.cast(img_tensor, tf.float32)
img_tensor = img_tensor/255.
def preprocess(img):
img = tf.keras.preprocessing.image.img_to_array(img)
img = np.expand_dims(img,axis=0)
return img
image_1 = preprocess(img_tensor)
predict = model.predict(image_1)
target_class = np.argmax(predict[0])
print("Target Class = %d"%target_class)
last_conv = model.get_layer(layer_name)
heatmap_model = models.Model([model.inputs], [last_conv.output, model.output])
with tf.GradientTape() as tape:
conv_output, predictions = heatmap_model(image_1)
loss = predictions[:, np.argmax(predictions[0])]
preds = model(image_1)
grads = tape.gradient(loss, conv_output)
pooled_grads = K.mean(grads,axis=(0,1,2))
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_output), axis=-1)
heatmap = np.maximum(heatmap,0)
max_heat = np.max(heatmap)
if max_heat == 0:
max_heat = 1e-10
heatmap /= max_heat
heatmap = reshape(heatmap, (8,8), 3)
plt.imshow(heatmap)
#Restore image dimensions
#image[:,:, 1].shape[1] -->> Corresponds to the y-axis of the img dimensions,
#image[:,:, 1].shape[0] -->> Corresponds to the x-axis of the img dimensions
heatmap = np.expand_dims(heatmap,axis=-1)
upsample = cv2.resize(heatmap, (image[:,:, 1].shape[1],image[:,:, 1].shape[0]), 3)
plt.imshow(image)
plt.imshow(upsample,alpha=0.5)
plt.show()
| [
"noreply@github.com"
] | Tobias-Frenger.noreply@github.com |
7607c9acf511d475ac0caeea957dd878d0a57ef1 | b0d55e80bbdbfbfde5f5149f76936df2376342cf | /kiva/models/__init__.py | 6334e2ed7141a79b4e30b2d92c5c6eb332f6a43a | [] | no_license | matin/kiva_data | 144750f721f9aa43d7c88e255604c22fb3184f44 | 8ec992c31581140f2d5b7e5febea64bbeecf0eb2 | refs/heads/master | 2021-01-01T16:41:40.502494 | 2017-09-03T01:40:22 | 2017-09-03T01:40:22 | 97,891,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | from .loans import Loan
from .loan_lenders import LoanLender
from .partners import Partner
| [
"mtamizi@gmail.com"
] | mtamizi@gmail.com |
0724e45aeed7460e471511187c5b3248453b5daa | 39ec72533c1914e6c3671f00392e7ee920a860ec | /data/snippets/pillow_text_alpha.py | e0c8b3e31b9558d15a6dede4bf485f3403f8a5a5 | [] | no_license | koo5/hackery2 | 6e7ce0e78088c64215e13281791b929c71b715e8 | 197306197eefc68f61c8cca25211430cf1c7e06c | refs/heads/master | 2023-08-29T21:34:10.010240 | 2023-08-19T21:48:38 | 2023-08-19T21:48:38 | 99,683,020 | 5 | 1 | null | 2021-08-12T23:49:24 | 2017-08-08T10:58:02 | Tcl | UTF-8 | Python | false | false | 702 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont
# get an image
base = Image.open('/home/koom/Untitled.png').convert('RGBA')
# make a blank image for the text, initialized to transparent text color
txt = Image.new('RGBA', base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype('FreeMono.ttf', 40)
# get a drawing context
d = ImageDraw.Draw(txt)
d = ImageDraw.Draw(base)
# draw text, half opacity
d.text((10,10), "Hello", font=fnt, fill=(255,255,255,28))
# draw text, full opacity
d.text((10,60), "World", font=fnt, fill=(255,255,255,255))
base.show()
out = Image.alpha_composite(base, txt)
out = Image.alpha_composite(base, base)
out.show()
| [
"kolman.jindrich@gmail.com"
] | kolman.jindrich@gmail.com |
a578e9df112d8212f39e3e751254ec4e1957cceb | 99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6 | /algorithm/day21/순열2.py | 0a6081a5a78fa25bfd7c44e27f558a5b94a4ee49 | [] | no_license | HSx3/TIL | 92acc90758015c2e31660617bd927f7f100f5f64 | 981c9aaaf09c930d980205f68a28f2fc8006efcb | refs/heads/master | 2020-04-11T21:13:36.239246 | 2019-05-08T08:18:03 | 2019-05-08T08:18:03 | 162,099,042 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | def myprint(n):
for i in range(n):
print("%d" % (a[i]), end=' ')
print()
def perm(n, k):
if n == k:
myprint(n)
else:
for i in range(k, n):
a[i], a[k] = a[k], a[i]
perm(n, k+1)
a[i], a[k] = a[k], a[i]
a = [1, 2, 3]
perm(3, 0) | [
"hs.ssafy@gmail.com"
] | hs.ssafy@gmail.com |
d2241a7063991c45c1f4f3a3e514a0b2f94a7d4f | eb73cc75bcda7a26784674c09a1cd14227889547 | /CIFAR10.py | 5aea00b3633ff54bb2692c2cdbe4a567b4071af0 | [] | no_license | junTaniguchi/python | 232fc43b8650b4168264120fba1b0f686ada042f | 09ca809bee9a96ff0a79e84f827afd9256a1f15a | refs/heads/master | 2021-01-22T05:28:23.793408 | 2017-03-25T12:44:54 | 2017-03-25T12:44:54 | 81,666,610 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,868 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 11 19:22:24 2017
@author: JunTaniguchi
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
np.random.seed(20160704)
tf.set_random_seed(20160704)
#データファイルから画像イメージとラベルデータを読み取る関数を用意する。
def read_cifar10(filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# 各画像部品を定義
label_bytes = 1
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
record_bytes = label_bytes + image_bytes
# ファイルから固定長レコードを出力するリーダー
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
# リーダーが生成した次のレコード(キー、値のペア)を返す
result.key, value = reader.read(filename_queue)
# 文字列のバイトを数字のベクトルとして再解釈する。
record_bytes = tf.decode_raw(value, tf.uint8)
# tf.slice(input_, begin, size, name=None)
# input_のbeginから指定されたsize分のテンソルを抽出する。
result.label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
#それぞれのラベルについて、8個ずつの画像イメージを表示する。
sess = tf.InteractiveSession()
filename = '/Users/JunTaniguchi/Desktop/pythonPlayGround/study_tensorflow/cifar10_data/cifar-10-batches-bin/test_batch.bin'
# tf.FIFOQueue(capacity, dtypes, shapes)
q = tf.FIFOQueue(99, [tf.string], shapes=())
q.enqueue([filename]).run(session=sess)
q.close().run(session=sess)
result = read_cifar10(q)
samples = [[] for l in range(10)]
while(True):
label, image = sess.run([result.label, result.uint8image])
label = label[0]
if len(samples[label]) < 8:
samples[label].append(image)
if all([len(samples[l]) >= 8 for l in range(10)]):
break
fig = plt.figure(figsize=(8,10))
for l in range(10):
for c in range(8):
subplot = fig.add_subplot(10, 8, l*8+c+1)
subplot.set_xticks([])
subplot.set_yticks([])
image = samples[l][c]
subplot.imshow(image.astype(np.uint8))
sess.close()
#前処理を施した画像イメージを生成する関数を用意する。
def distorted_samples(image):
reshaped_image = tf.cast(image, tf.float32)
width, height = 24, 24
float_images = []
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
float_image = tf.image.per_image_whitening(resized_image)
float_images.append(float_image)
for _ in range(6):
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
float_image = tf.image.per_image_whitening(distorted_image)
float_images.append(float_image)
return tf.concat(0,float_images)
#それぞれのラベルについて、オリジナル、及び、前処理を施した画像イメージを表示する。
sess = tf.InteractiveSession()
filename = '/Users/JunTaniguchi/Desktop/pythonPlayGround/study_tensorflow/cifar10_data/cifar-10-batches-bin/test_batch.bin'
q = tf.FIFOQueue(99, [tf.string], shapes=())
q.enqueue([filename]).run(session=sess)
q.close().run(session=sess)
result = read_cifar10(q)
fig = plt.figure(figsize=(8,10))
c = 0
original = {}
modified = {}
while len(original.keys()) < 10:
label, orig, dists = sess.run([result.label,
result.uint8image,
distorted_samples(result.uint8image)])
label = label[0]
if not label in original.keys():
original[label] = orig
modified[label] = dists
for l in range(10):
orig, dists = original[l], modified[l]
c += 1
subplot = fig.add_subplot(10, 8, c)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(orig.astype(np.uint8))
for i in range(7):
c += 1
subplot = fig.add_subplot(10, 8, c)
subplot.set_xticks([])
subplot.set_yticks([])
pos = i*24
image = dists[pos:pos+24]*40+120
subplot.imshow(image.astype(np.uint8))
sess.close()
| [
"noreply@github.com"
] | junTaniguchi.noreply@github.com |
77943a4d3e4d1148d94b9ad235dc96195e234ab2 | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OTLModel/Datatypes/KlVerkeersregelaarVoltage.py | 34ad4ce0c129e30a204bef55ade1b07e3f23d16f | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 1,838 | py | # coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlVerkeersregelaarVoltage(KeuzelijstField):
"""Keuzelijst met de voorkomende voltages gebruikt voor verkeersregelaars."""
naam = 'KlVerkeersregelaarVoltage'
label = 'Verkeersregelaar voltage'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlVerkeersregelaarVoltage'
definition = 'Keuzelijst met de voorkomende voltages gebruikt voor verkeersregelaars.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVerkeersregelaarVoltage'
options = {
'230': KeuzelijstWaarde(invulwaarde='230',
label='230',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarVoltage/230'),
'40': KeuzelijstWaarde(invulwaarde='40',
label='40',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarVoltage/40'),
'42': KeuzelijstWaarde(invulwaarde='42',
label='42',
status='ingebruik',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarVoltage/42')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
| [
"david.vlaminck@mow.vlaanderen.be"
] | david.vlaminck@mow.vlaanderen.be |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.