seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
39842661876 | import gzip
import json
import os
import sys
from itertools import islice
from typing import Dict, List
import git # pip3 install gitpython # https://gitpython.readthedocs.io/en/stable/intro.html
# Used if no path is specified in the command line arguments
DEFAULT_REPO_PATH = './linux.git'
def chunks(data, size):
"""
Yield successive n-sized chunks from data.
"""
iterator = iter(data)
for first in iterator:
yield [first] + list(islice(iterator, size - 1))
def write_gzip_file(file_path, data):
"""
Write data to a gzip file.
"""
with gzip.open(file_path, 'wt') as f:
json.dump(data, f)
print(f"Successfully wrote to {file_path}")
def get_tags_json(tags) -> (List[Dict], Dict):
"""
Construct JSON objects for tags.
Args:
tags (Iterable[git.TagReference] or Iterable[git.HeadReference]): An iterable of tags or branches.
Returns: Tuple[List[Dict], Dict]: A tuple of list of tags in JSON format, and a dictionary mapping tag commit IDs
to their respective tags or branches.
"""
tags_json = []
tag_id_to_tag_map = {}
for tag in tags:
try:
tag_json = {
"name": tag.name,
"commitId": tag.commit.hexsha
}
tag_id_to_tag_map[tag.commit.hexsha] = tag_json
tags_json.append(tag_json)
except:
print("error during parsing tag/branch ", tag, " ignoring")
return tags_json, tag_id_to_tag_map
def get_commits_json(repo: git.Repo, branch_id_to_branch_map: Dict, tag_id_to_tag_map: Dict) -> List[Dict]:
"""
Construct JSON objects for commits.
"""
commits_json = []
for commit in repo.iter_commits('--all'):
commit_json = {
"summary": commit.summary,
"committed_date": commit.committed_date,
"author_name": commit.author.name,
"author_email": commit.author.email,
"committer_name": commit.committer.name,
"committer_email": commit.committer.email,
"commit_id": commit.hexsha,
"parent_commits": [parent.hexsha for parent in commit.parents]
}
if commit.hexsha in tag_id_to_tag_map:
tag_json = tag_id_to_tag_map[commit.hexsha]
commit_json["tag"] = tag_json["name"]
tag_json["commitMessage"] = commit.summary
tag_json["commitTime"] = commit.committed_date
if commit.hexsha in branch_id_to_branch_map:
branch_json = branch_id_to_branch_map[commit.hexsha]
commit_json["branch"] = branch_json["name"]
branch_json["commitMessage"] = commit.summary
branch_json["commitTime"] = commit.committed_date
commits_json.append(commit_json)
commits_json.sort(key=lambda x: x['committed_date'], reverse=True)
return commits_json
def add_date_order(repo_json):
"""
Add commit date order to branches, and tags.
# commits are sorted by date, add to tags and branches field 'dateOrder' being the index of the commit in the commits list.
"""
commits = repo_json["commits"]
for tag in repo_json["tags"]:
tag["dateOrder"] = next((i for i, commit in enumerate(commits) if commit["commit_id"] == tag["commitId"]), None)
for branch in repo_json["branches"]:
branch["dateOrder"] = next((i for i, commit in enumerate(commits) if commit["commit_id"] == branch["commitId"]), None)
def get_repo_json(repo: git.Repo) -> Dict:
"""
Construct JSON object for repository.
Returns:
Dict: A dictionary containing branches, tags, and commits in JSON format.
"""
repo_json = {}
repo_json["branches"], branch_id_to_branch_map = get_tags_json(repo.branches)
repo_json["tags"], tag_id_to_tag_map = get_tags_json(repo.tags)
repo_json["commits"] = get_commits_json(repo, branch_id_to_branch_map, tag_id_to_tag_map)
print("Number of commits: ", len(repo_json["commits"]))
print("Number of branches: ", len(repo_json["branches"]))
print("Number of tags: ", len(repo_json["tags"]))
add_date_order(repo_json)
return repo_json
def write_chunked(repo_path, repo_json, CHUNK_SIZE=10000):
branches_and_tags_json = {
"branches": repo_json["branches"],
"tags": repo_json["tags"]
}
create_dir_cmd = f'mkdir {repo_path}_chunked'
os.system(create_dir_cmd)
repo_path = f'{repo_path}_chunked/{repo_path}'
branches_and_tags_json_path = f'{repo_path}_branches_and_tags.json.gz'
write_gzip_file(branches_and_tags_json_path, branches_and_tags_json)
# Save commits to separate files
commit_chunks = list(chunks(repo_json["commits"], CHUNK_SIZE))
for i, chunk in enumerate(commit_chunks):
commits_json_path = f'{repo_path}_commits_{i + 1}.json.gz'
write_gzip_file(commits_json_path, chunk)
def write_output(repo_path, repo_json, chunked=False):
if chunked:
write_chunked(repo_path, repo_json)
else:
write_gzip_file(repo_path + '.json.gz', repo_json)
def main(repo_path: str, chunked=False):
"""
Main execution function.
Args:
repo_path (str): The path to the git repository.
"""
repo = git.Repo(repo_path)
repo_json = get_repo_json(repo)
write_output(repo_path, repo_json, chunked=chunked)
if __name__ == '__main__':
repo_path = sys.argv[1] if len(sys.argv) > 1 else DEFAULT_REPO_PATH
main(repo_path)
| GaspardIV/gitvision | tool/tool.py | tool.py | py | 5,468 | python | en | code | 0 | github-code | 50 |
29169938932 | import logging
import unittest
import os
from genedescriptions.commons import Module
from genedescriptions.config_parser import GenedescConfigParser
from genedescriptions.data_manager import DataManager, DataType
from genedescriptions.descriptions_generator import OntologySentenceGenerator
from genedescriptions.gene_description import GeneDescription
logger = logging.getLogger("Gene Ontology Module tests")
class TestDescriptionsGenerator(unittest.TestCase):
def setUp(self):
logger.info("Starting Ontology Tools tests")
self.this_dir = os.path.split(__file__)[0]
self.conf_parser = GenedescConfigParser(os.path.join(self.this_dir, os.path.pardir, "tests", "config_test.yml"))
self.df = DataManager(do_relations=None, go_relations=["subClassOf", "BFO:0000050"])
logger.info("Loading go ontology from file")
logging.basicConfig(filename=None, level="ERROR", format='%(asctime)s - %(name)s - %(levelname)s: %(message)s')
self.df.load_ontology_from_file(ontology_type=DataType.GO, ontology_url="file://" + os.path.join(
self.this_dir, "data", "go_gd_test.obo"),
ontology_cache_path=os.path.join(self.this_dir, "cache", "go_gd_test.obo"),
config=self.conf_parser)
logger.info("Loading go associations from file")
self.df.load_associations_from_file(associations_type=DataType.GO, associations_url="file://" + os.path.join(
self.this_dir, "data", "gene_association_1.7.fb.partial"),
associations_cache_path=os.path.join(self.this_dir, "cache",
"gene_association_1.7.fb.partial"),
config=self.conf_parser)
logging.basicConfig(filename=None, level="INFO", format='%(asctime)s - %(name)s - %(levelname)s: %(message)s')
def test_set_or_extend_module_description_and_final_stats(self):
gene_desc = GeneDescription(gene_id="FB:FBgn0027655", gene_name="Test gene", add_gene_name=False,
config=self.conf_parser)
go_sent_generator = OntologySentenceGenerator(gene_id="FB:FBgn0027655", module=Module.GO,
data_manager=self.df, config=self.conf_parser)
sentences = go_sent_generator.get_module_sentences(aspect='P', qualifier='', merge_groups_with_same_prefix=True,
keep_only_best_group=True)
gene_desc.set_or_extend_module_description_and_final_stats(module=Module.GO_PROCESS, module_sentences=sentences)
self.assertTrue(gene_desc.description, "Is involved in several processes, including axo-dendritic transport, "
"establishment of mitotic spindle orientation, and positive regulation "
"of extent of heterochromatin assembly")
gene_desc = GeneDescription(gene_id="FB:FBgn0027655", gene_name="Test gene", add_gene_name=True,
config=self.conf_parser)
gene_desc.set_or_extend_module_description_and_final_stats(module=Module.GO_PROCESS, module_sentences=sentences)
self.assertTrue(gene_desc.description, "Test gene is involved in several processes, including axo-dendritic "
"transport, establishment of mitotic spindle orientation, and positive "
"regulation of extent of heterochromatin assembly")
| alliance-genome/agr_genedescriptions | tests/test_gene_description.py | test_gene_description.py | py | 3,693 | python | en | code | 3 | github-code | 50 |
16174387565 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
- Nombre: pforencoder.py
- Descripción: permite encode/decode de paquetes de enteros a/desde PFor
(NewPFor/OptPFor).
- Autor: Agustín González
- Modificado: 30/05/18
Nota: algoritmo basado en "Performance of Compressed Inverted List Caching
in Search Engines" de Zhang, Long y Suel y en implementación kamikaze de los
repositorios GitHub de @javasoze y @lemire: http://bit.ly/2EMv3YP.
Funcionamiento básico:
Siendo N una lista de números:
1. Se selecciona una cantidad de bits b que cubra el mayor número de nEN (90%).
2. Cada nEN codificado con b bits se almacena en una lista C. Pueden darse dos
casos:
1. n puede codificarse con b bits => se codifica n y se procede con n+1.
2. n no puede codificarse con b bits => se codifican los b bits bajos de n
y se agrega el elemento calculado a C. A su vez, siendo I una lista de
índices que referencia a la ubicación de n en N, y dada E la lista de
excepciones:
1. Se almacena el índice de n relativo a N en I.
2. Se almacenan los bits altos restantes de n en la lista E.
3. Se procede con n+1.
3. Se unifican las listas I y E en IE
4. Se comprime IE utilizando S16.
5. Se almacena el número b y la cantidad de excepciones en H (32 bits).
6. encoded = [H] + C + IE
'''
import math
try:
# Relative import.
from . import simple16encoder
from .bitutils import read_binary_from_iarray
from .bitutils import write_binary_in_iarray
except:
# Import para ejecución 'directa' del script.
import time
import simple16encoder
from bitutils import read_binary_from_iarray, write_binary_in_iarray
# Posibles 'b'.
POSSIBLES_B = [x for x in range(1, 33)]
# Máscaras de lectura para vector 'POSSIBLES_B' (nota: es bastante más rápido
# un dict que realizar el cálculo por mask dentro de los encode/decode).
# Ejemplo de máscara para lectura de 4 bits: (1 << 4) es 5 (10000 binario),
# luego 5-1 es 4, 1111, la máscara adecuada.
MASKS = {x: (1 << x)-1 for x in range(0, 33)}
# Tamaño total de cabecera (1 entero para parámetro b y cantidad de exs).
HEADER_SIZE = 32
# Tamaño máximo de B en header (bit).
B_HEADER_SIZE = 5
def estimate_encoded_size(numbers, b):
'''Estima el tamaño de codificación para la lista y el b dados. Se presupone
un tamaño de excepción de 32 bits es decir, sin comprimir (lo que implica
que las excepciones sean altamente 'penalizadas'). Nota: el tamaño de
codificación es estimado, no final.
Args:
numbers (int list): números a codificar.
b (int): cantidad de bits a utilizar por número.
Returns:
size (int): tamaño estimado de compresión.
'''
# Máximo número posible con b.
max_number = MASKS[b]
# Tamaño de header y slots.
size = HEADER_SIZE + len(numbers)*b
exception_count = 0
for i in range(0, len(numbers)):
if numbers[i] > max_number:
exception_count += 1
# Presunción de utilización de tamaño máximo (32 bits) por excepción.
size += exception_count*32
return size
def __find_optimal_b(numbers):
'''Halla el b óptimo (cantidad de bits a utilizar por slot) en base a la lista
de números pasada por parámetro.
Args:
numbers (int list): números a codificar.
Returns:
b (int): cantidad de bits óptima a utilizar para cada elemento de la
lista de números a codificar.
'''
optimal_b = POSSIBLES_B[0]
# Estimación de tamaño de compresión en base a b.
optimal_size = estimate_encoded_size(numbers, optimal_b)
# Selección del mejor b.
for i in range(1, len(POSSIBLES_B)):
current_b = POSSIBLES_B[i]
# Tamaño para b actual.
current_size = estimate_encoded_size(numbers, current_b)
# ¿Es el b calculado mejor que el óptimo hasta el momento?
if current_size < optimal_size:
optimal_b = current_b
optimal_size = current_size
return optimal_b
def encode(numbers):
'''Codifica una lista de números a PFor (NewPFor).
Args:
numbers (int list): números a codificar.
Returns
encoded(int list): lista de números codificada.
'''
# Parámetro b (máx número de bits por elemento)
b = __find_optimal_b(numbers)
# Lista de excepciones.
exceptions = []
# Índice de números del bloque catalogados como exceptions.
exceptions_indexes = []
# Excepciones.
exceptions = []
# Tamaño de encoded calculado como el cociente entre la cantidad de nros. y
# la cantidad de slots disponibles por entero.
numbers_per_int = math.floor(32/b)
encoded_size = int(math.ceil(len(numbers)/numbers_per_int))
encoded = [0] * encoded_size
# Offset (en bits) en encoded.
offset = 0
for i in range(0, len(numbers)):
number = numbers[i]
# Si el número es mayor al máximo permitido...
if number > MASKS[b]:
# El número es una excepción...
# Escritura de índice de la excepción.
exceptions_indexes.append(i)
# Escritura de los bits más significativos en lista de excepciones.
upper = (number >> b)
exceptions.append(upper)
# Escritura de b bits menos significativos.
number = number & MASKS[b]
# Escritura en encoded.
write_binary_in_iarray(encoded, offset, number, b)
# Aumento de offset con paso b.
offset += b
# Eliminación de partes redundantes de encoded.
# float(32), para compatibilidad con Py2
used_ints = int(math.ceil(offset/float(32)))
encoded = encoded[:used_ints]
# Índice de exceptions y b se almacenan juntos.
# Nota: b-1, ya que si b es 32, no se podría almacenar con 5 bits. Esto es
# válido, ya que el valor mínimo de b es 1, no 0 (no hay riesgo de b<0).
encoded.insert(0, (b-1 << (32-B_HEADER_SIZE)) + len(exceptions))
# encoded.insert(1, len(numbers))
# Encoded de índices y excepciones en Simple16.
exception_encode = simple16encoder.encode(exceptions_indexes + exceptions)
# Encoded final (header + blocks + excepcions)
encoded += exception_encode
return encoded
def __get_header(encoded):
'''Retorna datos de header de la codificación dado.
Args:
encoded (int list): números codificados.
Returns:
bits (int): cantidad de bits utilizados por slot.
exceptions_count (int): cantidad de excepciones en la codificación.
'''
offset = 32 - B_HEADER_SIZE
# Param b+1, ya que se resta 1 en encode.
bits = (encoded[0] >> offset)+1
exceptions_count = encoded[0] & MASKS[offset]
return bits, exceptions_count
def __merge_exceptions(decoded, exceptions, b):
'''Unifica números decodificados con excepciones.
Args:
decoded (int list): decodificación sin incluir excepciones.
exceptions (int list): lista de excepciones (con índices y exs).
b (int): tamaño de bit slots (utilizado para offset en exceptions).
Returns:
decoded (int list): lista unificada (números codificados).
'''
# División de lista de excepciones en índices y excepciones.
# middle = int(len(exceptions)/2)
middle = len(exceptions) >> 1
exceptions_indexes = exceptions[0:middle]
exceptions = exceptions[middle:]
for i in exceptions_indexes:
exception = exceptions.pop(0) << b
# Suma de exception al número indicado por el índice.
decoded[i] += exception
return decoded
def decode(encoded, nums):
'''Decodifica una secuencia de enteros codificada en PFor (NewPFor).
Args:
encoded (int list): números codificados.
nums (int): cantidad de números a decodificar.
Returns:
decoded (int list): números decodificados.
'''
# Obtención de header.
header = __get_header(encoded)
b = header[0]
exceptions_count = header[1]
# Eliminación de header.
encoded = encoded[1:]
decoded = []
# > 1° fase de decodificación: batch decode (más rápida).
offset = 0
for _ in range(0, nums):
decoded.append(read_binary_from_iarray(encoded, offset, b))
offset += b
# > 2° fase de decodificación (más lenta): excepciones.
if exceptions_count > 0:
ints_readed = int(math.ceil(offset/32))
exceptions = encoded[ints_readed:]
exceptions = simple16encoder.decode(exceptions)
# Eliminación de posibles 0s decodificados (x2 ya que se almacenan
# índices y excepciones). Nota: n*2 = n << 1
exceptions = exceptions[0:(exceptions_count << 1)]
decoded = __merge_exceptions(decoded, exceptions, b)
return decoded
def main():
'''Prueba de funcionamiento de las funciones encode y decode.'''
print("Prueba de encode/decode de 1 millón de enteros en curso...")
# upper = 1000000
# numbers = sorted(set(list(random.sample(range(2, upper*2), upper))))
numbers = list(range(1, 1000000))
# numbers = [1, 2]
# Encode
# gaps = gapsencoder.encode(numbers)
start = time.time()
encoded = encode(numbers)
end = time.time()
encoded_time = end-start
# Decode
start = time.time()
decoded = decode(encoded, len(numbers))
end = time.time()
decoded_time = end-start
if numbers != decoded:
print(numbers[-5:], decoded[-5:])
print("ATENCIÓN: numbers != decoded.")
return
print("Encoded time: {0}".format(encoded_time))
print("Decoded time: {0}".format(decoded_time))
if __name__ == '__main__':
main()
| gustingonzalez/ircodecs | pforencoder.py | pforencoder.py | py | 9,705 | python | es | code | 0 | github-code | 50 |
11822346824 | import re
import time
from jarvis.skills.skill import AssistantSkill
class WordSkills(AssistantSkill):
@classmethod
def spell_a_word(cls, voice_transcript, skill, **kwargs):
"""
Spell a words letter by letter.
:param voice_transcript: string (e.g 'spell word animal')
:param skill: dict (e.g
"""
tags = cls.extract_tags(voice_transcript, skill['tags'])
for tag in tags:
reg_ex = re.search(tag + ' ([a-zA-Z]+)', voice_transcript)
try:
if reg_ex:
search_text = reg_ex.group(1)
for letter in search_text:
cls.response(letter)
time.sleep(2)
except Exception as e:
cls.console(error_log=e)
cls.response("I can't spell the word")
| ggeop/Python-ai-assistant | src/jarvis/jarvis/skills/collection/text.py | text.py | py | 868 | python | en | code | 775 | github-code | 50 |
21416810205 | """
Variable Type 추가 시
1. UI variable_widget.ui에 추가
2. VariableListDialog _btnAddVariableClicked Method에 item으로 추가
3. DeclareVariableDialog popup에 추가
4. VariableWidget setComponent에 추가
5. VariableWidget getVariable에 추가
6. variable getValue에 추가
"""
import os
import re
import pickle
from PyQt5.QtCore import QDate
from PyQt5.QtWidgets import *
from PyQt5 import uic
from libs.variable import Variable
from utils.lib import makeVariableId
parentDir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
widget_class = uic.loadUiType(os.path.join(parentDir, "UI/variable_widget.ui"))[0]
class VariableWidget(QWidget, widget_class):
HOME = os.path.expanduser("~")
REF_SAVE_PATH = os.path.join(HOME, 'Test_Tool', 'ref', 'ref.ref_info')
def __init__(self, parent=None, case=None, option=True):
super(VariableWidget, self).__init__(parent)
self.setupUi(self)
self.target = None
self.case = case
self.variable = None
self.option = option
self._loadUiInit()
self.ref_data_list = []
self.sql_column_list = []
self.edt_variableId.textEdited.connect(self._variableIdTextEdited)
self.edt_variableId.editingFinished.connect(self._variableIdEditingFinished)
self.sb_refSubStrOption1.valueChanged.connect(self._subStrOptionChanged)
self.sb_refSubStrOption2.valueChanged.connect(self._subStrOptionChanged)
self.edt_refAddOption.textEdited.connect(self._refAddOptionTextEdited)
self.cb_oparamColumnId.currentIndexChanged['QString'].connect(self._cbOparamCurrentIndexChanged)
self.cb_refOption.currentIndexChanged['QString'].connect(self._cbRefOptionCurrentIndexChanged)
self.cb_excelFileNm.currentIndexChanged['QString'].connect(self._cbExcelFileNmCurrentIndexChanged)
self.cb_excelSheet.currentIndexChanged['QString'].connect(self._cbExcelSheetCurrentIndexChanged)
self.cb_excelColumnId.currentIndexChanged['QString'].connect(self._cbExcelColumnIdCurrentIndexChanged)
self.cb_sqlNm.currentIndexChanged['QString'].connect(self._cbSqlNmCurrentIndexChanged)
self.cb_sqlColumnId.currentIndexChanged['QString'].connect(self._cbSqlColumnIdCurrentIndexChanged)
self.edt_refEvalOption.textChanged.connect(self._refEvalOptionTextChanged)
def _loadUiInit(self):
self.edt_refId.hide()
self.edt_variableId.setStyleSheet("color: rgb(244, 164, 96);")
self.de_designatedDate.setDate(QDate.currentDate())
self.setRefOptionEnable(self.option)
def _variableIdTextEdited(self, variable_id):
if variable_id:
if variable_id[0] != '$' or variable_id[-1] != '$' or variable_id.find('$$') > -1:
self.edt_variableId.setStyleSheet("color: rgb(255, 0, 0);")
else:
self.edt_variableId.setStyleSheet("color: rgb(244, 164, 96);")
def _variableIdEditingFinished(self):
variable_id = makeVariableId(self.edt_variableId.text())
self.edt_variableId.setText(variable_id)
self._variableIdTextEdited(variable_id)
def _subStrOptionChanged(self):
value = self.variable.getValue()
self.setDataListValue(value)
def _refAddOptionTextEdited(self):
value = self.variable.getValue()
self.setDataListValue(value)
def _cbOparamCurrentIndexChanged(self, item):
self.getOparamInfo(item)
def getOparamInfo(self, column_id):
if self.variable is None:
svc_combo_name = self.getSvcComboNm()
svc_combo_index = next(idx for idx, refDataInfo in enumerate(self.ref_data_list) if refDataInfo.get('name') == svc_combo_name)
ref_data = self.ref_data_list[svc_combo_index]
variable_id = ref_data.get('{}_variable_id'.format(column_id))
variable_desc = ref_data.get('{}_desc'.format(column_id))
self.setVariableId(variable_id)
self.setDesc(variable_desc)
def _cbRefOptionCurrentIndexChanged(self, item):
if item is not None:
value = self.variable.getValue()
self.setDataListValue(value)
def _refEvalOptionTextChanged(self):
if self.getType() == 'Data List':
self.setDataListValue()
elif self.getType() == 'SVC COMBO (Swing Only)':
self.setSvcComboValue()
def _cbExcelFileNmCurrentIndexChanged(self, item):
if item:
self.cb_excelSheet.clear()
self.cb_excelColumnId.clear()
try:
file_index = next(idx for idx, refDataInfo in enumerate(self.ref_data_list) if refDataInfo.get('name') == item)
ref_file = self.ref_data_list[file_index]
sheet_list = list(sheetData['sheet'] for idx, sheetData in enumerate(ref_file.get('data')))
self.cb_excelSheet.addItems(sheet_list)
self.lbl_excelFileNmDesc.setText("")
except StopIteration:
self.lbl_excelFileNmDesc.setText("<h4><font color='{color}'> {text} </font></h4>".format(color='Red', text='Does not exist'))
def _cbExcelSheetCurrentIndexChanged(self, item):
file_name = self.getExcelFileNm()
if item and file_name:
self.cb_excelColumnId.clear()
try:
file_index = next(idx for idx, refDataInfo in enumerate(self.ref_data_list) if refDataInfo.get('name') == file_name)
ref_file = self.ref_data_list[file_index]
sheet_index = next(idx for idx, sheetData in enumerate(ref_file.get('data')) if sheetData['sheet'] == item)
ref_sheet = ref_file.get('data')[sheet_index]
column_list = ref_sheet['columns']
self.cb_excelColumnId.addItems(column_list)
self.lbl_excelSheetDesc.setText("")
except StopIteration:
self.lbl_excelSheetDesc.setText("<h4><font color='{color}'> {text} </font></h4>".format(color='Red', text='Does not exist'))
def _cbExcelColumnIdCurrentIndexChanged(self, item):
file_name = self.getExcelFileNm()
sheet = self.getExcelSheet()
if item and file_name:
try:
file_index = next(idx for idx, refDataInfo in enumerate(self.ref_data_list) if refDataInfo.get('name') == file_name)
ref_file = self.ref_data_list[file_index]
sheet_index = next(idx for idx, sheetData in enumerate(ref_file.get('data')) if sheetData['sheet'] == sheet)
ref_sheet = ref_file.get('data')[sheet_index]
column_list = ref_sheet['columns']
if item in column_list:
self.lbl_excelColumnIdDesc.setText("")
else:
self.lbl_excelColumnIdDesc.setText("<h4><font color='{color}'> {text} </font></h4>".format(color='Red', text='Does not exist'))
except StopIteration:
self.lbl_excelColumnIdDesc.setText("<h4><font color='{color}'> {text} </font></h4>".format(color='Red', text='Does not exist'))
def _cbSqlNmCurrentIndexChanged(self, item):
if item:
ref_data = self.getRefData('name', item, 'SQL')
query = ref_data.get('query')
self.sql_column_list = query.getOutputInfo()
self.edt_refId.setText(ref_data.getId())
self.setSqlColumnId(column_list=self.sql_column_list)
def _cbSqlColumnIdCurrentIndexChanged(self, item):
if item:
column_index = next(idx for idx, column_info in enumerate(self.sql_column_list) if column_info.get('column') == item)
selected_column_info = self.sql_column_list[column_index]
self.setVariableId(selected_column_info['variable_id'])
self.setDesc(selected_column_info['comment'])
def init(self):
self.cb_variableType.setCurrentIndex(0)
self.edt_variableId.setText('')
self.edt_targetNm.setText('')
self.edt_targetId.setText('')
self.edt_subId.setText('')
self.sb_rowIndex.setValue(0)
self.edt_columnId.setText('')
self.edt_descripton.setText('')
self.edt_dataListValue.setText('')
def setType(self, variable_type):
combo_idx = self.cb_variableType.findText(variable_type)
self.cb_variableType.setCurrentIndex(combo_idx)
def setTarget(self, target):
self.target = target
def setVariableId(self, variable_id):
self.edt_variableId.setText(variable_id)
def setTargetNm(self, target_nm):
self.edt_targetNm.setText(target_nm)
def setTargetId(self, target_id):
self.edt_targetId.setText(target_id)
def setSubId(self, sub_id):
self.edt_subId.setText(sub_id)
def setRowIndex(self, row_index):
if type(row_index) == int:
self.sw_rowIndex.setCurrentIndex(0)
method_combo_idx = self.cb_rowMethod.findText("Fix")
self.cb_rowMethod.setCurrentIndex(method_combo_idx)
self.sb_rowIndex.setValue(row_index)
self.edt_rowIndexColumnValue.setText("")
else:
self.sw_rowIndex.setCurrentIndex(1)
method_combo_idx = self.cb_rowMethod.findText("By Value")
self.cb_rowMethod.setCurrentIndex(method_combo_idx)
row_index_column_id = row_index["column_id"]
row_index_value = row_index["value"]
row_index_column_id_idx = self.cb_rowIndexColumnId.findText(row_index_column_id)
self.cb_rowIndexColumnId.setCurrentIndex(row_index_column_id_idx)
self.edt_rowIndexColumnValue.setText(row_index_value)
def setRowIndexColumnId(self, column_id_list):
self.cb_rowIndexColumnId.clear()
self.cb_rowIndexColumnId.addItems(column_id_list)
def setColumnId(self, column_id):
self.edt_columnId.setText(column_id)
def setDesc(self, description):
self.edt_descripton.setText(description)
def setDataListValue(self, value=''):
value = self.applyRefOption(value)
self.edt_dataListValue.setText(str(value))
def applyRefOption(self, value=''):
ref_option = self.getRefOption()
if ref_option == 'Substr':
value = self.variable.getValue()
start_index = self.sb_refSubStrOption1.value()
end_index = self.sb_refSubStrOption2.value()
value = value[start_index:end_index]
elif ref_option == 'Sum':
value = self.variable.getValue()
try:
value = int(value)
addOption = self.getRefAddOption()
option_variables = [x.strip() for x in addOption.split(',')]
for variable_id in option_variables:
variable = self.case.getVariable(variable_id)
if variable is None:
pass
else:
try:
add_value = int(variable.getValue())
value += add_value
except ValueError:
pass
except ValueError:
value = self.variable.getValue()
elif ref_option == 'Eval':
evalOption = self.getRefEvalOption()
try:
find_variables = re.findall(r"(?:[^\$]+[\$$])", evalOption, flags=re.MULTILINE | re.DOTALL)
if find_variables:
for tmp_variable_id in find_variables:
variable_id = makeVariableId(tmp_variable_id)
variable = self.case.getVariable(variable_id)
if variable:
value = variable.getValue()
evalOption = evalOption.replace(variable_id, "'{}'".format(value))
# print(evalOption)
value = eval(evalOption)
except NameError:
value = ''
except SyntaxError:
value = ''
except TypeError:
value = ''
return value
def setDateFormat(self, format):
format_combo_idx = self.cb_dateFormat.findText(format)
self.cb_dateFormat.setCurrentIndex(format_combo_idx)
def setDateOption(self, date_option):
date_option_combo_idx = self.cb_dateOption.findText(date_option)
self.cb_dateOption.setCurrentIndex(date_option_combo_idx)
def setDesignatedDate(self, designated_date):
if designated_date:
date = QDate.fromString(designated_date, "yyyyMMdd")
self.de_designatedDate.setDate(date)
def setDateValue(self, value):
self.edt_dateValue.setText(value)
def setExcelFileNm(self, file_nm):
file_nm_combo_idx = self.cb_excelFileNm.findText(file_nm)
if file_nm_combo_idx > -1:
self.lbl_excelFileNmDesc.setText('')
else:
self.cb_excelFileNm.insertItem(0, file_nm)
self.cb_excelFileNm.insertSeparator(1)
self.lbl_excelFileNmDesc.setText("<h4><font color='{color}'> {text} </font></h4>".format(color='Red', text='Does not exist'))
file_nm_combo_idx = 0
self.cb_excelFileNm.setCurrentIndex(file_nm_combo_idx)
def setExcelSheet(self, sheet):
sheet_combo_idx = self.cb_excelSheet.findText(sheet)
if sheet_combo_idx > -1:
self.lbl_excelSheetDesc.setText('')
else:
self.cb_excelSheet.insertItem(0, sheet)
self.cb_excelSheet.insertSeparator(1)
self.lbl_excelSheetDesc.setText("<h4><font color='{color}'> {text} </font></h4>".format(color='Red', text='Does not exist'))
sheet_combo_idx = 0
self.cb_excelSheet.setCurrentIndex(sheet_combo_idx)
def setExcelColumnId(self, column_id):
column_id_combo_idx = self.cb_excelColumnId.findText(column_id)
if column_id_combo_idx > -1:
self.lbl_excelColumnIdDesc.setText('')
else:
self.cb_excelColumnId.insertItem(0, column_id)
self.cb_excelColumnId.insertSeparator(1)
self.lbl_excelColumnIdDesc.setText("<h4><font color='{color}'> {text} </font></h4>".format(color='Red', text='Does not exist'))
column_id_combo_idx = 0
self.cb_excelColumnId.setCurrentIndex(column_id_combo_idx)
def setExcelValue(self, value):
self.edt_excelValue.setText(value)
def setFixedValue(self, value):
self.edt_fixedValue.setText(value)
def setRefOptionEnable(self, enabled):
if enabled:
self.lbl_refOption.show()
self.cb_refOption.show()
self.sw_refOptionDtl.show()
else:
self.lbl_refOption.hide()
self.cb_refOption.hide()
self.sw_refOptionDtl.hide()
def setRefOptionInfo(self, ref_option):
if ref_option:
ref_option_split = [x.strip() for x in ref_option.split('|')]
ref_option_type = ref_option_split[0]
ref_option_info = ref_option_split[1]
ref_option_combo_idx = self.cb_refOption.findText(ref_option_type)
self.cb_refOption.setCurrentIndex(ref_option_combo_idx)
if ref_option_type == 'Substr':
index_list = [x.strip() for x in ref_option_info.split(',')]
start_index = int(index_list[0])
end_index = int(index_list[1])
self.sb_refSubStrOption1.setValue(start_index)
self.sb_refSubStrOption2.setValue(end_index)
elif ref_option_type == 'Sum':
self.edt_refAddOption.setText(ref_option_info)
elif ref_option_type == 'Eval':
self.edt_refEvalOption.setText(ref_option_info)
def setOparamRowIndex(self, row_index):
if type(row_index) == int:
self.sw_oparamRowIndex.setCurrentIndex(0)
method_combo_idx = self.cb_oparamRowMethod.findText("Fix")
self.cb_oparamRowMethod.setCurrentIndex(method_combo_idx)
self.sb_oparamRowIndex.setValue(row_index)
self.edt_oparamRowIndexColumnValue.setText("")
else:
self.sw_oparamRowIndex.setCurrentIndex(1)
method_combo_idx = self.cb_rowMethod.findText("By Value")
self.cb_oparamRowMethod.setCurrentIndex(method_combo_idx)
row_index_column_id = row_index["column_id"]
row_index_value = row_index["value"]
row_index_column_id_idx = self.cb_oparamRowIndexColumnId.findText(row_index_column_id)
self.cb_oparamRowIndexColumnId.setCurrentIndex(row_index_column_id_idx)
self.edt_oparamRowIndexColumnValue.setText(row_index_value)
def setSvcComboValue(self, value=''):
value = self.applyRefOption(value)
self.edt_svcComboValue.setText(str(value))
def setSqlValue(self, value=''):
value = self.applyRefOption(value)
self.edt_sqlValue.setText(str(value))
def setSqlColumnId(self, column_list):
column_id_list = []
for column_info in column_list:
column_id_list.append(column_info['column'])
self.cb_sqlColumnId.clear()
self.cb_sqlColumnId.addItems(column_id_list)
self.cb_sqlRowIndexColumnId.clear()
self.cb_sqlRowIndexColumnId.addItems(column_id_list)
def getRefData(self, key, value, ref_type):
'''
reference data에서 key에 해당하는 value 값과 동일한 ref_data 를 Return
:param key: (str) 'name'
:param value: (str) '개인/내국인조회'
:param ref_type: (str) 'SQL'
:return: (class) reference
'''
if key == 'id':
index = next(idx for idx, ref_data_info in enumerate(self.ref_data_list) if ref_data_info.getId() == value and ref_data_info.getType() == ref_type)
else:
index = next(idx for idx, ref_data_info in enumerate(self.ref_data_list) if ref_data_info.get(key) == value and ref_data_info.getType() == ref_type)
ref_data = self.ref_data_list[index]
return ref_data
# ============================ Excel ============================
def setRefExcelInfo(self):
'''
Excel 참조 정보를 Setting
- Combobox에서 선택 할 수 있도로 값을 Setting
:param ref_data_list:
:return:
'''
self.ref_data_list = self.case.getRefData()
file_name_list = list(ref_data_info.get('name') for idx, ref_data_info in enumerate(self.ref_data_list) if ref_data_info.getType() == 'Excel')
self.cb_excelFileNm.clear()
self.cb_excelFileNm.addItems(file_name_list)
def typeSetEnabled(self, bool):
self.cb_variableType.setEnabled(bool)
def getType(self):
return self.cb_variableType.currentText()
def getVariableId(self):
return self.edt_variableId.text()
def getTarget(self):
return self.edt_target.text()
def getSubId(self):
return self.edt_subId.text()
def getRowIndex(self):
return self.sb_rowIndex.value()
def getColumnId(self):
return self.edt_columnId.text()
def getRowMethod(self):
return self.cb_rowMethod.currentText()
def getRowIndexColumnId(self):
return self.cb_rowIndexColumnId.currentText()
def getRowIndexValue(self):
return self.edt_rowIndexColumnValue.text()
def getDesc(self):
return self.edt_descripton.text()
def getRefOption(self):
return self.cb_refOption.currentText()
def getRefAddOption(self):
return self.edt_refAddOption.text()
def getRefEvalOption(self):
return self.edt_refEvalOption.text()
def getDataListValue(self):
return self.edt_dataListValue.text()
def getRefOptionInfo(self):
ref_option_info = ''
#if self.variable.variable_type == 'Data List':
ref_option = self.getRefOption()
if ref_option == 'Substr':
start_index = self.sb_refSubStrOption1.value()
end_index = self.sb_refSubStrOption2.value()
ref_option_info = '{ref_option} | {start_index}, {end_index}'.format(ref_option=ref_option, start_index=start_index, end_index=end_index)
elif ref_option == 'Sum':
add_option = self.getRefAddOption()
ref_option_info = '{ref_option} | {add_option}'.format(ref_option=ref_option, add_option=add_option)
elif ref_option == 'Eval':
eval_option = self.getRefEvalOption()
ref_option_info = '{ref_option} | {eval_option}'.format(ref_option=ref_option, eval_option=eval_option)
return ref_option_info
def getDateFormat(self):
return self.cb_dateFormat.currentText()
def getDateOption(self):
return self.cb_dateOption.currentText()
def getDesignatedDate(self):
designated_date = self.de_designatedDate.date()
return designated_date.toString("yyyyMMdd")
def getExcelFileNm(self):
return self.cb_excelFileNm.currentText()
def getExcelSheet(self):
return self.cb_excelSheet.currentText()
def getExcelColumnId(self):
return self.cb_excelColumnId.currentText()
def getfixedValue(self):
return self.edt_fixedValue.text()
# ============================ SVC COMBO (Swing Only) ============================
def setRefSvcComboInfo(self):
'''
SVC COMBO 참조 정보를 Setting
- Combobox에서 선택 할 수 있도로 값을 Setting
:param ref_data_list:
:return:
'''
self.ref_data_list = self.case.getRefData()
svc_combo_name_list = list(ref_data_info.get('name') for idx, ref_data_info in enumerate(self.ref_data_list) if ref_data_info.getType() == 'SVC COMBO (Swing Only)')
self.cb_svcComboNm.clear()
self.cb_svcComboNm.addItems(svc_combo_name_list)
self.getOparamInfo('oparam1')
def getSvcComboNm(self):
return self.cb_svcComboNm.currentText()
def getIparam1(self):
return self.edt_iparam1.text()
def getIparam2(self):
return self.edt_iparam2.text()
def getIparam3(self):
return self.edt_iparam3.text()
def getIparam4(self):
return self.edt_iparam4.text()
def getIparam5(self):
return self.edt_iparam5.text()
def getOparamColumnId(self):
return self.cb_oparamColumnId.currentText()
def getOparamRowMethod(self):
return self.cb_oparamRowMethod.currentText()
def getOparamRowIndex(self):
return self.sb_oparamRowIndex.value()
def getOparamRowIndexColumnId(self):
return self.cb_oparamRowIndexColumnId.currentText()
def getOparamRowIndexColumnValue(self):
return self.edt_oparamRowIndexColumnValue.text()
# ============================ SQL ============================
def setRefSqlInfo(self):
'''
SQL 참조 정보를 Setting
- Combobox에서 선택 할 수 있도로 값을 Setting
:param ref_data_list:
:return:
'''
self.ref_data_list = self.case.getRefData()
sql_name_list = list(ref_data_info.get('name') for idx, ref_data_info in enumerate(self.ref_data_list) if ref_data_info.getType() == 'SQL')
self.cb_sqlNm.clear()
self.cb_sqlNm.addItems(sql_name_list)
def setSqlRowIndex(self, row_index):
if type(row_index) == int:
self.sw_sqlRowIndex.setCurrentIndex(0)
method_combo_idx = self.cb_sqlRowMethod.findText("Fix")
self.cb_sqlRowMethod.setCurrentIndex(method_combo_idx)
self.sb_sqlRowIndex.setValue(row_index)
self.edt_sqlRowIndexColumnValue.setText("")
else:
self.sw_sqlRowIndex.setCurrentIndex(1)
method_combo_idx = self.cb_sqlRowMethod.findText("By Value")
self.cb_sqlRowMethod.setCurrentIndex(method_combo_idx)
row_index_column_id = row_index["column_id"]
row_index_value = row_index["value"]
row_index_column_id_idx = self.cb_sqlRowIndexColumnId.findText(row_index_column_id)
self.cb_sqlRowIndexColumnId.setCurrentIndex(row_index_column_id_idx)
self.edt_sqlRowIndexColumnValue.setText(row_index_value)
def getRefId(self):
return self.edt_refId.text()
def getVariable(self):
'''
입력값 기준으로 (Class) Variable 을 Return
:return: (Class) Variable
'''
var = None
variable_type = self.getType()
variable_id = self.getVariableId()
desc = self.getDesc()
if variable_type == 'Data List':
sub_id = self.getSubId()
row_method = self.getRowMethod()
column_id = self.getColumnId()
if row_method == 'Fix':
row_index = self.getRowIndex()
else:
row_index_column_id = self.getRowIndexColumnId()
row_index_value = self.getRowIndexValue()
row_index = {'column_id': row_index_column_id, 'value': row_index_value}
var = Variable(case=self.case, variable_type=variable_type, variable_id=variable_id)
var['description'] = desc
var['target'] = self.target
var['sub_id'] = sub_id
var['row_index'] = row_index
var['column_id'] = column_id
elif variable_type == 'Date':
date_option = self.getDateOption()
format = self.getDateFormat()
if date_option == "Today":
designated_date = ""
elif date_option == "Designated date":
designated_date = self.getDesignatedDate()
else:
designated_date = ""
var = Variable(case=self.case, variable_type=variable_type, variable_id=variable_id)
var['description'] = desc
var['date_option'] = date_option
var['designated_date'] = designated_date
var['format'] = format
elif variable_type == 'Excel':
file_nm = self.getExcelFileNm()
sheet = self.getExcelSheet()
column_id = self.getExcelColumnId()
var = Variable(case=self.case, variable_type=variable_type, variable_id=variable_id)
var['description'] = desc
var['file_nm'] = file_nm
var['sheet'] = sheet
var['column_id'] = column_id
elif variable_type == 'SQL':
ref_sql_id = self.getRefId()
column_id = self.cb_sqlColumnId.currentText()
row_method = self.cb_sqlRowMethod.currentText()
if row_method == 'Fix':
row_index = self.sb_sqlRowIndex.value()
else:
row_index_column_id = self.cb_sqlRowIndexColumnId.currentText()
row_index_value = self.edt_sqlRowIndexColumnValue.text()
row_index = {'column_id': row_index_column_id, 'value': row_index_value}
var = Variable(case=self.case, variable_type=variable_type, variable_id=variable_id)
var['description'] = desc
var['ref_sql_id'] = ref_sql_id
var['column_id'] = column_id
var['row_index'] = row_index
elif variable_type == 'Fixed Value':
value = self.getfixedValue()
var = Variable(case=self.case, variable_type=variable_type, variable_id=variable_id)
var['description'] = desc
var['value'] = value
elif variable_type == 'SVC COMBO (Swing Only)':
svc_combo_nm = self.getSvcComboNm()
column_id = self.getOparamColumnId()
row_method = self.getOparamRowMethod()
if row_method == 'Fix':
row_index = self.getOparamRowIndex()
else:
row_index_column_id = self.getOparamRowIndexColumnId()
row_index_value = self.getOparamRowIndexColumnValue()
row_index = {'column_id': row_index_column_id, 'value': row_index_value}
var = Variable(case=self.case, variable_type=variable_type, variable_id=variable_id)
var['description'] = desc
var['svc_combo_nm'] = svc_combo_nm
var['column_id'] = column_id
var['row_index'] = row_index
return var
def setComponent(self, variable):
self.variable = variable
self.setType(variable.variable_type)
self.setVariableId(variable.variable_id)
self.setDesc(variable.get('description'))
if variable.variable_type == 'Data List':
target_nm = variable.get('target').get('target')
target_id = variable.get('target').id
column_id_list = variable.get('target').getColumnList(variable.get('sub_id'))
self.setTarget(variable.get('target'))
self.setTargetNm(target_nm)
self.setTargetId(target_id)
self.setSubId(variable.get('sub_id'))
self.setRowIndexColumnId(column_id_list)
self.setRowIndex(variable.get('row_index'))
self.setColumnId(variable.get('column_id'))
self.setDataListValue(variable.getValue())
self.typeSetEnabled(False)
elif variable.variable_type == 'Date':
self.setDateOption(variable.get('date_option'))
self.setDesignatedDate(variable.get('designated_date'))
self.setDateFormat(variable.get('format'))
self.setDateValue(variable.getValue())
elif variable.variable_type == 'Excel':
self.setRefExcelInfo()
self.setExcelFileNm(variable.get('file_nm'))
self.setExcelSheet(variable.get('sheet'))
self.setExcelColumnId(variable.get('column_id'))
try:
value = variable.getValue()
except:
value = ''
self.setExcelValue(value)
elif variable.variable_type == 'SQL':
self.setRefSqlInfo()
ref_sql_id = variable.get('ref_sql_id')
ref_data = self.getRefData('id', ref_sql_id, 'SQL')
sql_nm = ref_data.get('name')
sql_nm_idx = self.cb_sqlNm.findText(sql_nm)
self.cb_sqlNm.setCurrentIndex(sql_nm_idx)
column_combo_idx = self.cb_sqlColumnId.findText(variable.get('column_id'))
self.cb_sqlColumnId.setCurrentIndex(column_combo_idx)
self.setSqlRowIndex(variable.get('row_index'))
self.setSqlValue(variable.getValue())
elif variable.variable_type == 'Fixed Value':
self.setFixedValue(variable.get('value'))
elif variable.variable_type == 'SVC COMBO (Swing Only)':
self.setRefSvcComboInfo()
svc_combo_nm_idx = self.cb_svcComboNm.findText(variable.get('svc_combo_nm'))
self.cb_svcComboNm.setCurrentIndex(svc_combo_nm_idx)
column_combo_idx = self.cb_oparamColumnId.findText(variable.get('column_id'))
self.cb_oparamColumnId.setCurrentIndex(column_combo_idx)
self.setOparamRowIndex(variable.get('row_index'))
self.setSvcComboValue(variable.getValue())
| jasonbaek97/test_tool | widgets/variableWidget.py | variableWidget.py | py | 31,317 | python | en | code | 0 | github-code | 50 |
15551055354 | """
Выполнение HTTP-запроса с помощью транспортного механизма и протокола
"""
import asyncio
from asyncio import AbstractEventLoop, Future, Transport
from typing import Optional
class HTTPGetClientProtocol(asyncio.Protocol):
def __init__(self, host: str, loop: AbstractEventLoop):
self._host: str = host
self._future: Future = loop.create_future()
self._transport: Optional[Transport] = None
self._response_buffer: bytes = b""
async def get_response(self):
# await until we get response from the server:
return await self._future
def _get_request_bytes(self) -> bytes:
request = (
f"GET / HTTP/1.1\r\n" f"Connection: close\r\n" f"Host: {self._host}\r\n\r\n"
)
return request.encode()
def connection_made(self, transport: Transport) -> None:
print(f"Got connection to {self._host}")
self._transport = transport
# When connection is established, use transport to send request:
self._transport.write(self._get_request_bytes())
def data_received(self, data):
print("Data received!")
# Store received data in internal buffer
self._response_buffer += data
def eof_received(self) -> Optional[bool]:
self._future.set_result(self._response_buffer.decode())
return False
def connection_lost(self, exc: Optional[Exception]) -> None:
if exc is None:
print("Connection closed without errors.")
else:
self._future.set_exception(exc)
| hazadus/asyncio-learn | ch8/listing_8_1.py | listing_8_1.py | py | 1,608 | python | en | code | 0 | github-code | 50 |
22226720670 | from tkinter import messagebox
import tkinter as tk
import requests
from threading import Thread
import pyperclip
api=" http://api.quotable.io/random"
quotes=[]
quote_number=0
n=0
window= tk.Tk()
window.geometry("1100x500")
window.title("Quote Generator")
window.grid_columnconfigure(0, weight=1)
window.resizable(True,True)
window.configure(bg="grey")
def copy_text():
pyperclip.copy(quotes[quote_number-1])
tk.messagebox.showinfo("Success", "Text copied to clipboard!")
def Preload_Quotes():
global quotes
print("♪♪♪ Loading Quotes ♪♪♪")
for x in range(10):
random_quote= requests.get(api).json()
content= random_quote["content"]
author= random_quote["author"]
quote=content + "\n\n" + "By " + author
quotes.append(quote)
Preload_Quotes()
def get_random_quote():
global quote_label
global quotes
global quote_number
quote_label.configure(text=quotes[quote_number])
quote_number= quote_number+1
print(quote_number)
if quotes[quote_number] == quotes[-3]:
thread = Thread(target=Preload_Quotes)
thread.start()
quote_label=tk.Label(window, text="Click on the button to generate a Quote",
height=9,
padx=15,
wraplength=900,
font=("Poppins",14))
quote_label.grid(row=0, column=0, stick="WE", padx=20, pady=10)
button = tk.Button(text="Generate", command=get_random_quote, bg='#66347F', fg='#ffffff', activebackground="#66347F", font=("Poppins",14))
button.grid(row=1, column=0, padx=5, pady=5)
button = tk.Button(text="Copy text", command=copy_text, bg='#ffffff', fg='#66347F', activebackground="#ffffff", font=("Poppins",14))
button.grid(row=2, column=0, padx=10, pady=10)
if __name__ == "__main__":
window.mainloop()
| Attafii/Quote_Generator | quote.py | quote.py | py | 1,964 | python | en | code | 1 | github-code | 50 |
10038860604 | import yaml
config = {
'site_url': None, # if has site_url, use absolute url
'site_dir': 'site',
'permalink': ':year:/:month:/:day:/:title:.html',
'template_dir': None,
'site_name': 'Your Site Name',
'author': 'Your Name'
}
def load_config(config_file, config):
with open(config_file, 'r') as fh:
config_fromfile = yaml.load(fh)
for key in config_fromfile:
if key in config and config_fromfile[key]:
config[key] = config_fromfile[key] | tye42/mdsite | mdsite/utils/configparser.py | configparser.py | py | 498 | python | en | code | 0 | github-code | 50 |
16486174667 | # 应用场景 : 多个参数的时候
# import urllib.parse
#
# data = {
# 'wd':'周杰伦',
# 'sex':'男',
# 'location':'中国台湾省'
# }
#
# a = urllib.parse.urlencode(data)
# print(a)
# 获取 网页源码
import urllib.request
import urllib.parse
base_url = 'https://www.baidu.com/s?'
data = {
'wd':'王力宏',
'sex':'男',
'location':'中国台湾省'
}
new_data = urllib.parse.urlencode(data)
# 请求资源路径
url = base_url + new_data
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36'
}
request = urllib.request.Request(url=url,headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content) | lyy82/pythonPaChong | 058_爬虫_urllib_get请求的urlencode方法.py | 058_爬虫_urllib_get请求的urlencode方法.py | py | 791 | python | en | code | 0 | github-code | 50 |
14437260850 | from copy import copy
import os
from shutil import copyfile
from syscore.dateutils import create_datetime_marker_string
from syscore.fileutils import get_resolved_pathname, files_with_extension_in_pathname
from syscore.objects import (
resolve_function,
)
from syscore.constants import arg_not_supplied, success, failure
from syscore.interactive.menus import print_menu_of_values_and_get_response
from sysdata.config.production_config import get_production_config
from sysdata.data_blob import dataBlob
from sysobjects.production.backtest_storage import interactiveBacktest
from sysproduction.data.generic_production_data import productionDataLayerGeneric
from sysproduction.data.strategies import (
get_valid_strategy_name_from_user,
diagStrategiesConfig,
)
PICKLE_EXT = ".pck"
CONFIG_EXT = ".yaml"
PICKLE_FILE_SUFFIX = "_backtest"
CONFIG_FILE_SUFFIX = "_config"
PICKLE_SUFFIX = PICKLE_FILE_SUFFIX + PICKLE_EXT
CONFIG_SUFFIX = CONFIG_FILE_SUFFIX + CONFIG_EXT
def user_choose_backtest(data: dataBlob = arg_not_supplied) -> interactiveBacktest:
(
strategy_name,
timestamp,
) = interactively_choose_strategy_name_timestamp_for_backtest(data)
data_backtest = dataBacktest(data=data)
backtest = data_backtest.load_backtest(
strategy_name=strategy_name, timestamp=timestamp
)
return backtest
def interactively_choose_strategy_name_timestamp_for_backtest(
data: dataBlob = arg_not_supplied,
) -> (str, str):
strategy_name = get_valid_strategy_name_from_user(data=data)
timestamp = interactively_choose_timestamp(data=data, strategy_name=strategy_name)
return strategy_name, timestamp
def interactively_choose_timestamp(
strategy_name: str, data: dataBlob = arg_not_supplied
):
data_backtest = dataBacktest(data)
list_of_timestamps = sorted(
data_backtest.get_list_of_timestamps_for_strategy(strategy_name)
)
# most recent last
print("Choose the backtest to load:\n")
timestamp = print_menu_of_values_and_get_response(
list_of_timestamps, default_str=list_of_timestamps[-1]
)
return timestamp
class dataBacktest(productionDataLayerGeneric):
def get_most_recent_backtest(self, strategy_name: str) -> interactiveBacktest:
list_of_timestamps = sorted(
self.get_list_of_timestamps_for_strategy(strategy_name)
)
# most recent last
timestamp_to_use = list_of_timestamps[-1]
backtest = self.load_backtest(strategy_name, timestamp_to_use)
return backtest
def load_backtest(self, strategy_name: str, timestamp: str) -> interactiveBacktest:
system = create_system_with_saved_state(self.data, strategy_name, timestamp)
backtest = interactiveBacktest(
system=system, strategy_name=strategy_name, timestamp=timestamp
)
return backtest
def get_list_of_timestamps_for_strategy(self, strategy_name):
timestamp_list = get_list_of_timestamps_for_strategy(strategy_name)
return timestamp_list
def get_list_of_timestamps_for_strategy(strategy_name):
list_of_files = get_list_of_pickle_files_for_strategy(strategy_name)
list_of_timestamps = [
rchop(file_name, PICKLE_FILE_SUFFIX) for file_name in list_of_files
]
return list_of_timestamps
def create_system_with_saved_state(data, strategy_name, date_time_signature):
"""
:param system_caller: some callable function that accepts a config parameter
:param strategy_name: str
:param date_time_signature: str
:return: system
"""
system_caller = get_system_caller(data, strategy_name, date_time_signature)
system = system_caller()
system = load_backtest_state(system, strategy_name, date_time_signature)
return system
def get_system_caller(data, strategy_name, date_time_signature):
# returns a method we can use to recreate a system
strategy_loader_config_original = (
get_strategy_class_backtest_loader_config_without_warning(
data=data, strategy_name=strategy_name
)
)
## Whenever popping best to copy first
strategy_loader_config = copy(strategy_loader_config_original)
strategy_class_object = resolve_function(strategy_loader_config.pop("object"))
function = strategy_loader_config.pop("function")
config_filename = get_backtest_config_filename(strategy_name, date_time_signature)
strategy_class_instance = strategy_class_object(
data, strategy_name, backtest_config_filename=config_filename
)
method = getattr(strategy_class_instance, function)
return method
def get_loader_config(data: dataBlob, strategy_name: str) -> dict:
try:
strategy_loader_config = (
get_strategy_class_backtest_loader_config_without_warning(
data, strategy_name
)
)
except BaseException:
strategy_loader_config = dict(
object="sysproduction.strategy_code.run_system_classic.runSystemClassic",
function="system_method",
)
data.log.warning(
"No configuration strategy_list/strategy_name/load_backtests; using defaults %s"
% str(strategy_loader_config)
)
return strategy_loader_config
def get_strategy_class_backtest_loader_config_without_warning(data, strategy_name):
diag_strategy_config = diagStrategiesConfig(data)
strategy_loader_config = (
diag_strategy_config.get_strategy_config_dict_for_specific_process(
strategy_name, "load_backtests"
)
)
return strategy_loader_config
def load_backtest_state(system, strategy_name, date_time_signature):
"""
Given a system, recover the saved state
:param system: a system object whose config is compatible
:param strategy_name: str
:return: system with cache filled from pickled backtest state file
"""
filename = get_backtest_pickle_filename(strategy_name, date_time_signature)
system.cache.unpickle(filename)
return system
def store_backtest_state(data, system, strategy_name="default_strategy"):
"""
Store a pickled backtest state and backtest config for a system
:param data: data object, used to access the log
:param system: a system object which has run
:param strategy_name: str
:param backtest_config_filename: the filename of the config used to run the backtest
:return: success
"""
ensure_backtest_directory_exists(strategy_name)
datetime_marker = create_datetime_marker_string()
pickle_filename = get_backtest_pickle_filename(strategy_name, datetime_marker)
pickle_state(data, system, pickle_filename)
config_save_filename = get_backtest_config_filename(strategy_name, datetime_marker)
system.config.save(config_save_filename)
return success
def ensure_backtest_directory_exists(strategy_name):
full_directory = get_backtest_directory_for_strategy(strategy_name)
try:
os.makedirs(full_directory)
except FileExistsError:
pass
def rchop(s, suffix):
if suffix and s.endswith(suffix):
return s[: -len(suffix)]
return None
def get_list_of_pickle_files_for_strategy(strategy_name):
full_directory = get_backtest_directory_for_strategy(strategy_name)
list_of_files = files_with_extension_in_pathname(full_directory, PICKLE_EXT)
return list_of_files
def get_backtest_pickle_filename(strategy_name, datetime_marker):
# eg
# '/home/rob/data/backtests/medium_speed_TF_carry/20200616_122543_backtest.pck'
prefix = get_backtest_filename_prefix(strategy_name, datetime_marker)
suffix = PICKLE_SUFFIX
return prefix + suffix
def get_backtest_config_filename(strategy_name, datetime_marker):
# eg
# '/home/rob/data/backtests/medium_speed_TF_carry/20200616_122543_config.yaml'
prefix = get_backtest_filename_prefix(strategy_name, datetime_marker)
suffix = CONFIG_SUFFIX
return prefix + suffix
def get_backtest_filename_prefix(strategy_name, datetime_marker):
# eg '/home/rob/data/backtests/medium_speed_TF_carry/20200622_102913'
full_directory = get_backtest_directory_for_strategy(strategy_name)
full_filename_prefix = os.path.join(full_directory, datetime_marker)
return full_filename_prefix
def get_backtest_directory_for_strategy(strategy_name):
# eg '/home/rob/data/backtests/medium_speed_TF_carry'
directory_store_backtests = get_directory_store_backtests()
directory_store_backtests = get_resolved_pathname(directory_store_backtests)
full_directory = os.path.join(directory_store_backtests, strategy_name)
return full_directory
def get_directory_store_backtests():
# eg '/home/rob/data/backtests/'
production_config = get_production_config()
store_directory = production_config.get_element("backtest_store_directory")
return store_directory
def pickle_state(data, system, backtest_filename):
try:
system.cache.pickle(backtest_filename)
data.log.debug("Pickled backtest state to %s" % backtest_filename)
return success
except Exception as e:
data.log.warning(
"Couldn't save backtest state to %s error %s" % (backtest_filename, e)
)
return failure
def copy_config_file(data, resolved_backtest_config_filename, config_save_filename):
try:
copyfile(resolved_backtest_config_filename, config_save_filename)
data.log.debug(
"Copied config file from %s to %s"
% (resolved_backtest_config_filename, config_save_filename)
)
return success
except Exception as e:
data.log.warning(
"Couldn't copy config file from %s to %s error %s"
% (resolved_backtest_config_filename, config_save_filename, e)
)
return failure
| robcarver17/pysystemtrade | sysproduction/data/backtest.py | backtest.py | py | 9,848 | python | en | code | 2,180 | github-code | 50 |
40792864032 | import itertools
import collections
from inlinetesting.TestingAtoms import assert_equal, AssuranceError, AlternativeAssertionError, summon_cactus
from inlinetesting.TestingBasics import assure_raises_instanceof
class ProvisionError(Exception):
pass
class MysteriousError(Exception):
""" don't catch this. Just identify its cause and replace it with a better exception. and then maybe catch it. """
pass
def take_first_and_iter(input_seq):
inputGen = iter(input_seq)
try:
first = next(inputGen)
except StopIteration:
raise ProvisionError("could not take first item.")
return (first, inputGen)
assure_raises_instanceof(take_first_and_iter, ProvisionError)([])
assert take_first_and_iter(range(2, 10))[0] == 2
def assert_empty(input_seq):
inputGen = iter(input_seq)
try:
first = next(inputGen)
except StopIteration:
return
assert False, "input_seq was not empty, first item was {}.".format(repr(first))
assert_empty((item for item in []))
"""
try:
assert_empty([5])
raise AlternativeAssertionError() # just because it is never caught, but this isn't its purpose.
except AssertionError:
pass
"""
assure_raises_instanceof(assert_empty, AssertionError)([5])
def wrap_with(input_fun, wrapper):
""" this is helpful in testing whether a generator eventually raises an error. """
def wrap_with_inner(*args, **kwargs):
return wrapper(input_fun(*args, **kwargs))
return wrap_with_inner
assert_equal(wrap_with(sum, (lambda x: x**2))([1,2,3]), 36)
testZip = zip("ab","cd")
izip_shortest = (zip if (iter(testZip) is iter(testZip)) else itertools.izip)
testZip2 = izip_shortest("ab","cd")
assert (iter(testZip2) is iter(testZip2)) and (not isinstance(testZip2, list)), "can't izip?"
del testZip, testZip2
try:
izip_longest = itertools.izip_longest
except AttributeError:
izip_longest = itertools.zip_longest
"""
def izip_uniform(*input_seqs):
raise NotImplementedError("doesn't work!")
inputGens = [iter(inputSeq) for inputSeq in input_seqs]
outputGen = izip_shortest(*inputGens)
for item in outputGen:
yield item
failData = set()
for i,inputGen in enumerate(inputGens):
try:
assert_empty(inputGen)
except AssertionError:
failData.add(i)
if len(failData)> 0:
raise AssuranceError("The following seq(s) were not empty: {}.".format(failData))
"""
"""
def get_next_of_each(input_gens):
try:
return tuple(next(inputGen) for inputGen in inputGens)
except StopIteration:
raise
"""
def izip_uniform(*input_seqs):
inputGens = list(map(iter, input_seqs))
currentBucket = []
for itemIndex in itertools.count():
currentBucket.clear()
for inputGenIndex, inputGen in enumerate(inputGens):
try:
currentBucket.append(next(inputGen))
except StopIteration:
if inputGenIndex == 0:
for genIndexB, genB in enumerate(inputGens):
try:
assert_empty(genB)
except AssertionError:
raise AssuranceError(f"the generators did not run out of items all at the same time, at item index {itemIndex}.") from None
return # they all ran out at the same time.
else:
raise AssuranceError(f"generator at index {inputGenIndex} had no item at index {itemIndex}!")
# continue to next gen.
yield tuple(currentBucket)
assert False
assert_equal(list(izip_uniform("abcdefg", [1,2,3,4,5,6,7])), list(zip("abcdefg", [1,2,3,4,5,6,7])))
assure_raises_instanceof(wrap_with(izip_uniform, list), AssuranceError)("abcdefg", [1,2,3,4,5,6])
def izip_uniform_containers(*input_containers):
sharedLength = len(input_containers[0])
if not all(hasattr(item, "__len__") for item in input_containers):
raise TypeError(f"These containers don't all have __len__. Their types are {[type(item) for item in input_containers]}.")
if not all(len(other)==sharedLength for other in input_containers[1:]):
raise AssuranceError(f"The items don't all have the same lengths. Their lengths are {[len(item) for item in input_containers]}.")
return izip_shortest(*input_containers)
def gen_track_previous(input_seq):
previousItem = None
for item in input_seq:
yield (previousItem, item)
previousItem = item
assert (list(gen_track_previous(range(5,10))) == [(None,5),(5,6),(6,7),(7,8),(8,9)])
def gen_track_previous_full(input_seq, allow_waste=False):
try:
previousItem, inputGen = take_first_and_iter(input_seq)
except ProvisionError:
raise MysteriousError("can't fill, because there are no items.")
try:
currentItem = next(inputGen)
except StopIteration:
if allow_waste:
return
else:
raise MysteriousError("waste would happen, but is not allowed.")
yield (previousItem, currentItem)
previousItem = currentItem
for currentItem in inputGen:
yield (previousItem, currentItem)
previousItem = currentItem
assert_equal(list(gen_track_previous_full(range(5,10))), [(5,6), (6,7), (7,8), (8,9)])
assure_raises_instanceof(wrap_with(gen_track_previous_full, list), MysteriousError)([5])
assure_raises_instanceof(wrap_with(gen_track_previous_full, list), MysteriousError)([])
def gen_track_recent(input_seq, count=None, default=None):
history = collections.deque([default for i in range(count)])
for item in input_seq:
history.append(item)
history.popleft()
yield tuple(history)
assert list(gen_track_recent("abcdef", count=3, default=999)) == [(999, 999, "a"), (999, "a", "b"), ("a","b","c"), ("b","c","d"),("c","d","e"),("d","e","f")]
def gen_track_recent_trimmed(input_seq, count=None):
history = collections.deque([])
for item in input_seq:
history.append(item)
while len(history) > count:
history.popleft()
yield tuple(history)
assert_equal(list(gen_track_recent_trimmed("abcdef", count=3)), [("a",), ("a", "b"), ("a","b","c"), ("b","c","d"),("c","d","e"),("d","e","f")])
def gen_track_recent_full(input_seq, count=None, allow_waste=False):
assert count >= 2
leftSentinel = object()
result = gen_track_recent(input_seq, count=count, default=leftSentinel)
trash = tuple(leftSentinel for i in range(count))
while trash.count(leftSentinel) > 1:
try:
trash = next(result)
except StopIteration:
if allow_waste:
return ()
else:
raise MysteriousError(f"Not enough items to yield a full batch of {count} items.")
assert trash.count(leftSentinel) == 1
assert trash[0] is leftSentinel
return result
assert (list(gen_track_recent_full("abcdef", count=3)) == [("a","b","c"),("b","c","d"),("c","d","e"),("d","e","f")])
assert (list(gen_track_recent_full("abc", count=5, allow_waste=True)) == [])
assure_raises_instanceof(wrap_with(gen_track_recent_full, list), MysteriousError)("abc", count=5, allow_waste=False)
def enumerate_to_depth_unpacked(data, depth=None):
assert depth > 0
if depth == 1:
for pair in enumerate(data): # return can't be used because yield appears in other branch. This does NOT produce error messages in python 3.8.10.
yield pair
else:
assert depth > 1
for i, item in enumerate(data):
for longItem in enumerate_to_depth_unpacked(item, depth=depth-1):
yield (i,) + longItem
assert_equal(list(enumerate_to_depth_unpacked([5,6,7,8], depth=1)), [(0,5), (1,6), (2,7), (3,8)])
assert_equal(list(enumerate_to_depth_unpacked([[5,6],[7,8]], depth=2)), [(0,0,5), (0,1,6), (1,0,7), (1,1,8)])
def enumerate_to_depth_packed(data, depth=None):
assert depth > 0
if depth == 1:
for i, item in enumerate(data):
yield ((i,), item)
else:
assert depth > 1
for i, item in enumerate(data):
for subItemAddress, subItem, in enumerate_to_depth_packed(item, depth=depth-1):
yield ((i,)+subItemAddress, subItem)
assert_equal(list(enumerate_to_depth_packed([5,6,7,8], depth=1)), [((0,),5), ((1,),6), ((2,),7), ((3,),8)])
assert_equal(list(enumerate_to_depth_packed([[5,6],[7,8]], depth=2)), [((0,0),5), ((0,1),6), ((1,0),7), ((1,1),8)])
def iterate_to_depth(data, depth=None):
assert depth > 0
if depth == 1:
for item in data: # return can't be used because yield appears in other branch. This does NOT produce error messages in python 3.8.10.
yield item
else:
assert depth > 1
for item in data:
for subItem in iterate_to_depth(item, depth=depth-1):
yield subItem
assert_equal(list(iterate_to_depth([[2,3], [4,5], [[6,7], 8, [9,10]]], depth=2)), [2,3,4,5,[6,7],8,[9,10]])
def gen_chunks_as_lists(data, length, *, allow_partial=True):
itemGen = iter(data)
while True:
chunk = list(itertools.islice(itemGen, 0, length))
if len(chunk) == 0:
assert_empty(itemGen)
return
elif len(chunk) == length:
yield chunk
else:
assert 0 < len(chunk) < length
assert_empty(itemGen)
if not allow_partial:
raise AssuranceError("The last chunk was partial. It contained {} of the required {} items.".format(len(chunk), length))
yield chunk
return
assert False
assert list(gen_chunks_as_lists(range(9), 2)) == [[0,1], [2,3], [4,5], [6,7], [8]]
assert list(gen_chunks_as_lists(range(8), 2)) == [[0,1], [2,3], [4,5], [6,7]]
assure_raises_instanceof(wrap_with(gen_chunks_as_lists, list), AssuranceError)(range(9), 2, allow_partial=False)
def get_next_assuredly_available(input_gen, *, too_few_exception=None):
try:
result = next(input_gen)
except StopIteration:
if too_few_exception is not None:
raise too_few_exception from None
else:
raise AssuranceError("no next item was available!") from None
return result
assert_equal(get_next_assuredly_available(iter(range(2,5))), 2)
assure_raises_instanceof(get_next_assuredly_available, AssuranceError)(iter(range(0)))
def get_next_assuredly_last(input_gen, *, too_few_exception=None, too_many_exception=None):
result = get_next_assuredly_available(input_gen, too_few_exception=too_few_exception)
try:
assert_empty(input_gen)
except AssertionError as ate:
if too_many_exception is not None:
raise too_many_exception from None
else:
raise AssuranceError(f"more items remained. assert_empty says: {ate}") from None
return result
assure_raises_instanceof(get_next_assuredly_last, AssuranceError)(iter(range(5)))
assure_raises_instanceof(get_next_assuredly_last, AssuranceError)(iter(range(0)))
def yield_next_assuredly_last(input_gen, **kwargs):
yield get_next_assuredly_last(input_gen, **kwargs)
def assure_gen_length_is(input_gen, length):
assert length > 0
assert iter(input_gen) is iter(input_gen)
return itertools.chain(itertools.slice(input_gen, length-1), yield_next_assuredly_last(input_gen))
def gen_assure_never_exhausted(input_seq):
i = -1
for i, item in enumerate(input_seq):
yield item
raise AssuranceError("input_seq was exhausted after {} items.".format(i+1))
def islice_assuredly_full(input_seq, *other_args, **other_kwargs):
"""
assert length >= 1
for i, item in enumerate(input_seq):
yield item
if i+1 == length:
return
raise AssuranceError("a full slice could not be made.")
"""
return itertools.islice(gen_assure_never_exhausted(input_seq), *other_args, **other_kwargs)
| JohnDorsey/inlinetesting | PureGenTools.py | PureGenTools.py | py | 12,172 | python | en | code | 0 | github-code | 50 |
17859236671 | import pickle
import pylab as plt
import seaborn as sns
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC2':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC2')]
with open(cwd / 'figures' / 'msi' / 'results' / 'latents_sum_new.pkl', 'rb') as f:
latents = pickle.load(f)
##choose one K fold
non_repeats = latents[0][0]
repeats = latents[0][1]
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
fig.subplots_adjust(top=0.975,
bottom=0.07,
left=0.11,
right=0.98,
hspace=0.14,
wspace=0.04)
sns.kdeplot(non_repeats.flatten(), shade=True, gridsize=300, ax=ax1, alpha=1)
sns.kdeplot(non_repeats.flatten(), shade=False, gridsize=300, ax=ax1, alpha=1, color='k', linewidth=1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_visible(False)
ax1.spines['bottom'].set_linewidth(1)
ax1.set_xticks([])
ax1.tick_params(axis='y', length=0, width=0, labelsize=8)
ax1.set_ylabel('Variant Density (thousand)', fontsize=10)
ax1.set_xlim(.0, .08)
ax1.set_title('Other', fontsize=12, loc='left', y=.87, x=.01)
sns.kdeplot(repeats.flatten(), shade=True, gridsize=300, ax=ax2, alpha=1)
sns.kdeplot(repeats.flatten(), shade=False, gridsize=300, ax=ax2, alpha=1, color='k', linewidth=1)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.spines['bottom'].set_linewidth(1)
ax2.set_xticks([])
ax2.tick_params(axis='y', length=0, width=0, labelsize=8)
ax2.set_xlabel('Attention', fontsize=12)
ax2.set_ylabel('Variant Density (thousand)', fontsize=10, labelpad=8)
ax2.set_xlim(.0, .08)
ax2.set_title('Simple Repeats', fontsize=12, loc='left', y=.87, x=.01)
fig.canvas.draw()
ax1.set_yticklabels([str(int(round(float(i.get_text())/100 * non_repeats.shape[0] / 1000, 0))) for i in ax1.get_yticklabels()])
ax2.set_yticklabels([str(int(round(float(i.get_text())/100 * repeats.shape[0] / 1000, 0))) for i in ax2.get_yticklabels()])
plt.savefig(cwd / 'figures' / 'msi' / 'kde.pdf')
| OmnesRes/ATGC | figures/msi/latent_figure.py | latent_figure.py | py | 2,038 | python | en | code | 3 | github-code | 50 |
12621785148 | """
Only going down
"""
import numpy as np
import pandas as pd
import platform
import os
os.chdir('../')
path = os.getcwd()
if platform.system() == 'Windows':
vnx = pd.read_csv(path + '\\data\\VNX.csv', usecols=["ticker"])
if platform.system() != 'Windows':
vnx = pd.read_csv(path + '/data/VNX.csv', usecols=["ticker"])
vnx_ticker = np.array(vnx)
total_buy = 0
total_sell = 0
for ticker in vnx_ticker:
ticker_id = ticker[0]
ticker_exchange = ticker[1]
if platform.system() == 'Windows':
file = path + "\\data\\VNX\\" + ticker_id + '\\Price.csv'
if platform.system() != 'Windows':
file = path + '/data/VNX/' + ticker_id + '/Price.csv'
ticker_data = pd.read_csv(file, usecols=["close"])
price = np.array(ticker_data["close"])
reversed_price = price[-4:-1] # 3 last prices
if reversed_price[0] > reversed_price[1] and reversed_price[1] > reversed_price[2]:
print(ticker_id)
| zuongthaotn/quant-trading-by-py | GoingDown/filter01.py | filter01.py | py | 942 | python | en | code | 0 | github-code | 50 |
75005510876 | from rest_framework import serializers
from main.models import Event, Category
from app_user.serializers import AppUserDetailsSerializer
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
read_only_fields = ('slug',)
class EventCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = '__all__'
class EventALLSerializer(serializers.ModelSerializer):
author = AppUserDetailsSerializer(read_only=True)
category = CategorySerializer(read_only=True)
class Meta:
model = Event
fields = [
'title',
'description',
'image',
'date',
'venue',
'slug',
'category',
'author',
'featured',
'event_url',
'community'
]
| Kenan7/corvento_backend | main/serializers.py | serializers.py | py | 902 | python | en | code | 1 | github-code | 50 |
70573332315 | import json
import os
import pickle
import random
import cv2 as cv
import torch
from torchvision import transforms
from config import device, im_size, pickle_file_aligned, train_ratio, IMG_DIR
from data_gen import data_transforms
from utils import idx2name
def save_images(full_path, filename, i):
raw = cv.imread(full_path)
resized = cv.resize(raw, (im_size, im_size))
cv.imwrite('images/{}_raw.jpg'.format(i), resized)
img = cv.imread(os.path.join(IMG_DIR, filename))
img = cv.resize(img, (im_size, im_size))
cv.imwrite('images/{}_img.jpg'.format(i), img)
if __name__ == "__main__":
with open(pickle_file_aligned, 'rb') as file:
data = pickle.load(file)
samples = data['samples']
num_samples = len(samples)
num_train = int(train_ratio * num_samples)
samples = samples[num_train:]
samples = random.sample(samples, 10)
inputs = torch.zeros([10, 3, im_size, im_size], dtype=torch.float, device=device)
transformer = data_transforms['valid']
sample_preds = []
for i, sample in enumerate(samples):
full_path = sample['full_path']
filename = sample['filename']
print(filename)
save_images(full_path, filename, i)
full_path = os.path.join(IMG_DIR, filename)
# full_path = sample['filename']
# bbox = sample['bboxes'][0]
img = cv.imread(full_path)
# img = crop_image(img, bbox)
img = cv.resize(img, (im_size, im_size))
img = cv.resize(img, (im_size, im_size))
img = img[..., ::-1] # RGB
img = transforms.ToPILImage()(img)
img = transformer(img)
inputs[i] = img
age = sample['attr']['age']
pitch = sample['attr']['angle']['pitch']
roll = sample['attr']['angle']['roll']
yaw = sample['attr']['angle']['yaw']
beauty = sample['attr']['beauty']
expression = sample['attr']['expression']['type']
face_prob = sample['attr']['face_probability']
face_shape = sample['attr']['face_shape']['type']
face_type = sample['attr']['face_type']['type']
gender = sample['attr']['gender']['type']
glasses = sample['attr']['glasses']['type']
race = sample['attr']['race']['type']
sample_preds.append({'i': i, 'age_true': age,
'pitch_true': pitch,
'roll_true': roll,
'yaw_true': yaw,
'beauty_true': beauty,
'expression_true': expression,
'face_prob_true': face_prob,
'face_shape_true': face_shape,
'face_type_true': face_type,
'gender_true': gender,
'glasses_true': glasses,
'race_true': race})
checkpoint = 'BEST_checkpoint.tar'
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model = model.to(device)
model.eval()
with torch.no_grad():
reg_out, expression_out, gender_out, glasses_out, race_out = model(inputs)
print(reg_out.size())
reg_out = reg_out.cpu().numpy()
age_out = reg_out[:, 0]
pitch_out = reg_out[:, 1]
roll_out = reg_out[:, 2]
yaw_out = reg_out[:, 3]
beauty_out = reg_out[:, 4]
_, expression_out = expression_out.topk(1, 1, True, True)
print('expression_out.size(): ' + str(expression_out.size()))
_, gender_out = gender_out.topk(1, 1, True, True)
print('gender_out.size(): ' + str(gender_out.size()))
_, glasses_out = glasses_out.topk(1, 1, True, True)
print('glasses_out.size(): ' + str(glasses_out.size()))
_, race_out = race_out.topk(1, 1, True, True)
print('race_out.size(): ' + str(race_out.size()))
expression_out = expression_out.cpu().numpy()
print('expression_out.shape: ' + str(expression_out.shape))
gender_out = gender_out.cpu().numpy()
print('gender_out.shape: ' + str(gender_out.shape))
glasses_out = glasses_out.cpu().numpy()
print('glasses_out.shape: ' + str(glasses_out.shape))
race_out = race_out.cpu().numpy()
print('race_out.shape: ' + str(race_out.shape))
for i in range(10):
sample = sample_preds[i]
sample['age_out'] = int(age_out[i] * 100)
sample['pitch_out'] = float('{0:.2f}'.format(pitch_out[i] * 360 - 180))
sample['roll_out'] = float('{0:.2f}'.format(roll_out[i] * 360 - 180))
sample['yaw_out'] = float('{0:.2f}'.format(yaw_out[i] * 360 - 180))
sample['beauty_out'] = float('{0:.2f}'.format(beauty_out[i] * 100))
sample['expression_out'] = idx2name(int(expression_out[i][0]), 'expression')
sample['gender_out'] = idx2name(int(gender_out[i][0]), 'gender')
sample['glasses_out'] = idx2name(int(glasses_out[i][0]), 'glasses')
sample['race_out'] = idx2name(int(race_out[i][0]), 'race')
with open('sample_preds.json', 'w') as file:
json.dump(sample_preds, file, indent=4, ensure_ascii=False)
| foamliu/Face-Attributes-Mobile | demo.py | demo.py | py | 5,080 | python | en | code | 40 | github-code | 50 |
74371008795 | from django import forms
from web.models import *
class NoticeForm(forms.Form):
target_type = forms.ChoiceField(
choices=(("channel", "Channel"), ("stream", "Stream"))
)
action = forms.ChoiceField(
choices=(("add", "Add"), ("remove", "Remove"))
)
target_id = forms.CharField(
required=True, max_length=24
)
notice_min = forms.IntegerField(required=False, max_value=1440, min_value=1) # Max: 24h, Min: 1m
def clean_notice_min(self):
if not self.cleaned_data.get("notice_min"):
self.cleaned_data["notice_min"] = 5
return self.cleaned_data.get("notice_min")
def clean_target_id(self):
target = self.cleaned_data.get("target_id")
target_type = self.cleaned_data.get("target_type")
if target_type == "channel":
if not Channel.objects.filter(channel_id=target):
raise forms.ValidationError("Invalid channel/video ID. or target is not registered.")
elif target_type == "stream":
if not Stream.objects.filter(video_id=target):
raise forms.ValidationError("Invalid channel/video ID. or target is not registered.")
return target
def clean_action(self):
action = self.cleaned_data.get("action")
if action not in ["add", "remove"]:
raise forms.ValidationError("Invalid parameter - action")
return action
def clean_target_type(self):
if self.cleaned_data.get("target_type") not in ["channel", "stream"]:
raise forms.ValidationError("Invalid parameter - target_type")
return self.cleaned_data.get("target_type")
| FuckBrains/VTuberSchedule | notify/forms.py | forms.py | py | 1,661 | python | en | code | 0 | github-code | 50 |
38817252633 | import json
from .models import *
def encode(object):
if isinstance(object, User):
return {
'id': object.id,
'password': object.password,
'email': object.email,
'phone_number': object.phone_number,
'date_creation': str(object.date_creation),
'is_active': object.is_active,
}
elif isinstance(object, Patient):
return {
'id': object.id,
'user': encode(object.user),
'first_name': object.first_name,
'last_name': object.last_name,
'middle_name': object.middle_name,
'birth_date': object.birth_date,
'snils': object.snils,
'policy': object.policy,
}
elif isinstance(object, Doctor):
return {
'id': object.id,
'name': object.name,
'surname': object.surname,
'patronymic': object.patronymic,
'birth_date': str(object.birth_date),
'speciality': encode(object.speciality),
'department': encode(object.department),
'services': [encode(service) for service in object.services.all()],
'department': encode(object.department),
}
elif isinstance(object, Speciality):
return {
'id': object.id,
'name': object.name
}
elif isinstance(object, Department):
return {
'id': object.id,
'name': object.name
}
elif isinstance(object, Cabinet):
return {
'id': object.id,
'number': object.number,
'department': encode(object.department)
}
elif isinstance(object, Service):
return {
'id': object.id,
'category': encode(object.category),
'name': object.name,
'description': object.description,
'time': str(object.time)
}
elif isinstance(object, ServiceCategory):
return {
'id': object.id,
'name': object.name
}
elif isinstance(object, Price):
return {
'id': object.id,
'service': encode(object.service),
'cost': object.cost,
'date_approval': object.date_approval,
}
elif isinstance(object, ReceptionPlan):
return {
'id': object.id,
'service': encode(object.service),
'doctor': encode(object.doctor),
'date': str(object.date)
}
elif isinstance(object, ReceptionLine):
return {
'id': object.id,
'reception_plan': encode(object.reception_plan),
'time': str(object.time)
}
elif isinstance(object, Register):
return {
'id': object.id,
'reception_line': encode(object.reception_line),
'patient': encode(object.patient)
}
else:
type_name = object.__class__.__name__
raise TypeError(f'Object of type "{type_name}" is not JSON serializable')
| VOINTENT/hospital-backend | appointment/serializers.py | serializers.py | py | 3,094 | python | en | code | 0 | github-code | 50 |
42731261890 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import argparse
import sys
import os
# This line is used for deploy to PYTHONPATH
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from KeyFinder.comm.logger import logger
from KeyFinder.workflow import walk_keyword, walk_replace
def get_args():
"""
This function is parse user CLI input values, it supports components level
:return: The args user input, type: object
"""
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--key", required=True, type=str, metavar="<Find Keywords>",
help="What keywords you want to walk.")
parser.add_argument("-p", "--path", required=True, type=str, metavar="<Walk Path>", help="Input walk path.")
parser.add_argument("-b", "--branch", type=str, metavar="<Git Branch>", help="Choose git repo branch.")
parser.add_argument("--replace", type=str, metavar="<True|False>", choices=["True", "False"], default=False,
help="A json file store replace content. Default: [False]")
args = parser.parse_args()
return args
def show_title_info(key, path, branch, replace):
logger.info("============================================ Task Begin =============================================")
logger.info("Keyword: %s, Path: %s, Branch: %s, Replace: %s" % (key, path, branch, replace))
logger.info("=====================================================================================================")
def show_end_info(key, path, branch, replace):
logger.info("============================================= Task End ==============================================")
logger.info("Keyword: %s, Path: %s, Branch: %s, Replace: %s" % (key, path, branch, replace))
logger.info("=====================================================================================================")
def change_directory(path):
logger.info("Changing work directory to: %s" % path)
try:
os.chdir(path)
except FileNotFoundError as err:
raise RuntimeError("Cannot change to path: %s with err: %s" % (path, err))
def perform_checkout_branch(branch):
logger.info("Performing checkout branch...")
if os.system("git checkout {}".format(branch)):
logger.error("Checkout branch failed, please check branch name!")
raise RuntimeError("Checkout branch failed, please check branch name!")
def perform_walk_keyword(path, keyword):
logger.info("Performing walk keyword...")
with walk_keyword.WalkKeyWorkflow(path, keyword) as walk_key:
walk_key.save_walk_data()
def perform_replace(path, keyword):
logger.info("Performing replace user data...")
with walk_replace.WalkReplaceWorkflow(path, keyword) as walk_rep:
walk_rep.update_walk_replace()
def main():
args = get_args()
# Remove duplicated items
keyword = list(set(args.key.split(",")))
path = args.path
branch = args.branch
replace = args.replace
show_title_info(keyword, path, branch, replace)
change_directory(path)
if branch:
perform_checkout_branch(branch)
perform_walk_keyword(path, keyword)
if replace:
perform_replace(path, keyword)
show_end_info(keyword, path, branch, replace)
if __name__ == '__main__':
main()
| caohuileon/KeyFinder | KeyFinder/findKey.py | findKey.py | py | 3,327 | python | en | code | 0 | github-code | 50 |
5999645446 | from django.urls import path
from .views import *
urlpatterns = [
# path('list', list_trainees, name='list_trainees'),
path('list', ListTrainees.as_view(), name='list_trainees'),
# path('add', add_trainee, name='add_trainee'),
path('add', AddTrainee.as_view(), name='add_trainee'),
path('update/<int:id>', update_trainee, name='update_trainee'),
path('delete/<int:id>', delete_trainee, name='delete_trainee'),
]
| AalaaBadr/ITI-Django-school-system | school/trainee/urls.py | urls.py | py | 438 | python | en | code | 0 | github-code | 50 |
75069063835 | from django.shortcuts import render
import datetime
from django.shortcuts import render,redirect
from rest_framework.decorators import api_view,permission_classes,authentication_classes
from rest_framework.permissions import IsAuthenticated
from user.models import Account,UserToken,Categories,District,City
from .models import Job_Detail,JobVerification,JobComplaint
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated,IsAdminUser
import random
from user.authentication import ADMINAuth, JWTAuthentications, create_access_token,create_refresh_token,decode_access_token,decode_refresh_token
from .serializer import CategorySerializer,CitySerializer,DistrictSerializer, JobHistorySerializer,JobSerializer,EditJobSerializer, JobVerificationSerializer,JobComplaintSerializer
from rest_framework.pagination import PageNumberPagination
from rest_framework import generics
from rest_framework import viewsets
from django_filters import rest_framework as filters
from user.verify import send,check
from rest_framework import status
from django.core.mail import send_mail
# Create your views here.
# gettting rent categories
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def rentcategories(request):
rent=Categories.objects.filter(category_of='rent')
serializer=CategorySerializer(rent,many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def jobcategories(request):
rent=Categories.objects.filter(category_of='job')
serializer=CategorySerializer(rent,many=True)
return Response(serializer.data)
# showing all district
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def getdistrict(request):
district=District.objects.all()
serializer=DistrictSerializer(district,many=True)
return Response(serializer.data)
# showing the city as per disctrict
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def getcity(request,id):
district=District.objects.get(id=id)
city=City.objects.filter(district=district)
serializer=CitySerializer(city,many=True)
return Response(serializer.data)
# showing all the city as per disctrict
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def getallcity(request):
city=City.objects.all()
serializer=CitySerializer(city,many=True)
return Response(serializer.data)
@api_view(['POST'])
@authentication_classes([JWTAuthentications])
def jobpost(request):
data=request.data
user=request.user
mobiles=user.mobile
yr= int(datetime.date.today().strftime('%Y'))
dt= int(datetime.date.today().strftime('%d'))
mt= int(datetime.date.today().strftime('%m'))
d=datetime.date(yr,mt,dt)
current_date =d.strftime("%Y%m%d")
val=(random.randint(1, 99))
order_number=current_date +str(user.id)+str(val)
print(order_number)
job= Job_Detail.objects.create(
user=user,
mobile=mobiles,
district_id=data['district'],
city_id=data['city'],
title=data['title'],
category_id=data['category'],
discriptions=data['discription'],
sub_mobile=data['sub_mobile'],
place=data['place'],
address=data['address'],
rate=data['rate'],
slug=data['slug'],
available=True,
ordernumber=order_number,
valid_at =data['date'],
)
user.count+=1
user.save()
verify=JobVerification()
verify.mobile=mobiles
verify.order_number=order_number
verify.name=data['title']
verify.save()
serializer=JobSerializer(job,many=False)
return Response(serializer.data)
# all job view
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def getallpost(request):
now=datetime.datetime.now()
job=Job_Detail.objects.filter(payment='True',booked='False',available='True',valid_at__gte=now)
serializer=JobSerializer(job,many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def singlejobview(request,id):
job=Job_Detail.objects.get(id=id)
serializer=JobSerializer(job,many=False)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def singlejobview_withBooked_person(request,id):
job=Job_Detail.objects.get(id=id)
serializer=JobSerializer(job,many=False)
return Response(serializer.data)
@api_view(['PUT'])
@authentication_classes([JWTAuthentications])
def editingjob(request,id):
try:
print('d111211')
job=Job_Detail.objects.get(id=id)
edit=EditJobSerializer(instance=job,data=request.data)
if edit.is_valid():
print('dfdf')
edit.save()
return Response(edit.data)
except:
response=Response()
response.data={
'message':'password miss match '
}
return response
# for compliting the post and showing on posted surface
@api_view(['POST'])
# @authentication_classes([JWTAuthentications])
def paymentdone(request):
# try:
data=request.data
orderid=data['order_number']
print(orderid)
job=Job_Detail.objects.filter(ordernumber=orderid).exists()
print(job,'dfffd')
if not job:
response=Response()
response.data={
'error':'this item is not present '
}
return response
else:
job=Job_Detail.objects.get(ordernumber=orderid)
job.payment=True
job.save()
serializer=JobSerializer(job,many=False)
return Response(serializer.data)
# filter with category and place
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def showjob(request,id,cid):
try:
category=Categories.objects.get(id=id)
print(category)
city=City.objects.get(id=cid)
print(city)
job=Job_Detail.objects.filter(category=category,city=city,payment='True',booked='False',available='True')
serializer=JobSerializer(job,many=True)
return Response(serializer.data)
except:
response=Response()
response.data={
'error':'error in request'
}
return response
#filter with district
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def disctrict_job_show(request,id):
try:
district=District.objects.get(id=id)
print(district)
job=Job_Detail.objects.filter(district=district,payment='True',booked='False',available='True')
serializer=JobSerializer(job,many=True)
return Response(serializer.data)
except:
response=Response()
response.data={
'error':'error in request'
}
return response
# all data
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def all_job_show(request):
try:
district=District.objects.all()
print(district)
job=Job_Detail.objects.filter(payment='True',booked='False',available='True')
serializer=JobSerializer(job,many=True)
return Response(serializer.data)
except:
response=Response()
response.data={
'error':'error in request'
}
return response
class LargeResultsSetPagination(PageNumberPagination):
page_size = 2
page_size_query_param = 'page_size'
class BillingRecordsView(generics.ListAPIView):
authentication_classes = [JWTAuthentications]
queryset =Job_Detail.objects.filter(payment='True',booked='False',available='True')
serializer_class = JobSerializer
pagination_class = LargeResultsSetPagination
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def Givingjob_history(request):
user=request.user
job=Job_Detail.objects.filter(user__email=user)
serializer=JobHistorySerializer(job,many=True)
return Response(serializer.data)
class giving_job_edit(viewsets.ModelViewSet):
authentication_classes=[JWTAuthentications]
queryset=Job_Detail.objects.all()
serializer_class=JobSerializer
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def taking_job_history(request):
user=request.user
print(user,'kkkkkkk')
job=Job_Detail.objects.filter(booked_person__email=user)
serializer=JobHistorySerializer(job,many=True)
return Response(serializer.data)
# day for verify
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def Givingjob_verify_day(request):
user=request.user
now=datetime.datetime.now()
job=Job_Detail.objects.filter(user__email=user,valid_at=now)
serializer=JobHistorySerializer(job,many=True)
return Response(serializer.data)
# day for verify table
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def employee_verify_day(request):
user=request.user
now=datetime.datetime.now()
job=Job_Detail.objects.filter(booked_person__email=user,valid_at=now)
serializer=JobHistorySerializer(job,many=True)
return Response(serializer.data)
# day for verify
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def verify_data(request,number):
print(number,'number')
verify=JobVerification.objects.get(order_number=number)
serializer=JobVerificationSerializer(verify,many=False)
return Response(serializer.data)
# staring job verification
@api_view(['POST'])
@authentication_classes([JWTAuthentications])
def start_verify_data(request):
data=request.data
number=data['number']
print(number,'ithhh')
verify=JobVerification.objects.get(order_number=number)
mobile=verify.mobile
print(mobile,'mobilezzzz')
send(mobile)
verify.job_start=True
verify.start_otp=True
verify.save()
serializer=JobVerificationSerializer(verify,many=False)
return Response(serializer.data)
# staring job verification
@api_view(['POST'])
@authentication_classes([JWTAuthentications])
def start_verify_check(request):
data=request.data
number=data['number']
print(number,'uuuuupopp')
code=data['otp']
print(code,'jkjkjkj')
verify=JobVerification.objects.get(order_number=number)
mobile=verify.mobile
if check(mobile,code):
verify.start_verify=True
verify.save()
# send_mail( 'From WEDID ',
# f'congradulations !!! Your Rent service got purchased \n, {rent.title} service is takened by {request.user.first_name} ,\n you can contact +91{request.user.mobile} , \n thankyou ',
# 'wedidsolutions@gmail.com'
# ,[rent.user.email]
# ,fail_silently=False)
serializer=JobVerificationSerializer(verify,many=False)
return Response(serializer.data)
else:
message={'error':'otp is not valid'}
return Response(message,status=status.HTTP_400_BAD_REQUEST)
# staring job verification
@api_view(['POST'])
@authentication_classes([JWTAuthentications])
def end_verify_data(request):
data=request.data
number=data['number']
verify=JobVerification.objects.get(order_number=number)
mobile=verify.mobile
print(mobile,'mobile')
send(mobile)
verify.end_otp=True
verify.save()
serializer=JobVerificationSerializer(verify,many=False)
return Response(serializer.data)
# staring job verification
@api_view(['POST'])
@authentication_classes([JWTAuthentications])
def end_verify_check(request):
data=request.data
number=data['number']
code=data['otp']
verify=JobVerification.objects.get(order_number=number)
mobile=verify.mobile
if check(mobile,code):
verify.end_verify=True
verify.save()
job=Job_Detail.objects.get(ordernumber=number)
job.verified=True
job.save()
serializer=JobVerificationSerializer(verify,many=False)
return Response(serializer.data)
else:
message={'error':'otp is not valid'}
return Response(message,status=status.HTTP_400_BAD_REQUEST)
# total exp of service
@api_view(['POST'])
@authentication_classes([JWTAuthentications])
def total_giving_expense(request):
user=request.user
mobile=user.mobile
print(mobile)
job=JobVerification.objects.filter(mobile=mobile,job_end=True)
print(len(job),'lengthhh')
sum=0
for i in job:
print(i.order_number)
exps=Job_Detail.objects.filter(ordernumber=i.order_number).exists()
if exps:
exp=Job_Detail.objects.get(ordernumber=i.order_number)
print(exp,'expp')
print(exp.rate)
sum=sum+exp.rate
# serializer=JobVerificationSerializer(job,many=True)
return Response({'count':sum})
# total revenuew of service
@api_view(['POST'])
@authentication_classes([JWTAuthentications])
def total_revenue(request):
user=request.user
job=Job_Detail.objects.filter(booked_person__email=user)
sum=0
for i in job:
print(i.ordernumber)
reve= JobVerification.objects.filter(order_number= i.ordernumber).exists()
if reve:
revenue= JobVerification.objects.get(order_number= i.ordernumber)
if revenue.job_end:
sum=sum+i.rate
print(sum,'ddd')
return Response({'count':sum})
# total completed services
@api_view(['GET'])
@authentication_classes([JWTAuthentications])
def total_completed_task(request):
user=request.user
verify=Job_Detail.objects.filter(user=user,verified=True).exists()
if verify:
verify=Job_Detail.objects.filter(user=user,verified=True)
else:
ver=Job_Detail.objects.filter(booked_person__email=user,verifed=True).exits()
if ver:
verify=Job_Detail.objects.filter(booked_person__email=user,verifed=True)
serializer=JobSerializer(verify,many=True)
return Response(serializer.data)
@api_view(["POST"])
@authentication_classes([JWTAuthentications])
def job_complaint(request):
data=request.data
user=request.user
job=data['jobId']
typeId=Job_Detail.objects.filter(user=user).exists()
if typeId:
type='buyer'
else:
type='worker'
value=JobComplaint.objects.filter(job=job).exists()
if value:
if type=='buyer':
verify=JobComplaint.objects.filter(job=job,buyer=True).exists()
if verify:
message={'error':'you are already submitted the complaint'}
return Response(message,status=status.HTTP_400_BAD_REQUEST)
else:
comp=JobComplaint.objects.create(
user=user,
job_id=job,
complaint=data['complaint'],
buyer=True,
)
serializer=JobComplaintSerializer(comp,many=False)
return Response(serializer.data)
else:
verify=JobComplaint.objects.filter(job=job,buyer=False).exists()
if verify:
message={'error':'you are already submitted the complaint'}
return Response(message,status=status.HTTP_400_BAD_REQUEST)
else:
comp=JobComplaint.objects.create(
user=user,
job_id=job,
complaint=data['complaint'],
buyer=False,
)
serializer=JobComplaintSerializer(comp,many=False)
return Response(serializer.data)
else:
if type=='buyer':
verify=JobComplaint.objects.filter(user=user,buyer=True).exists()
comp=JobComplaint.objects.create(
user=user,
job_id=job,
complaint=data['complaint'],
buyer=True,
)
serializer=JobComplaintSerializer(comp,many=False)
return Response(serializer.data)
else:
comp=JobComplaint.objects.create(
user=user,
job_id=job,
complaint=data['complaint'],
buyer=False,
)
serializer=JobComplaintSerializer(comp,many=False)
return Response(serializer.data)
| imviz/WEDID | weDid/jobportal/views.py | views.py | py | 16,549 | python | en | code | 4 | github-code | 50 |
11980639871 | import constants as c
import subprocess
import hashlib
import logging
import tarfile
import sys
from SRA_submission_tool.submission_db import SubmissionDBService
__author__ = "Amr Abouelleil"
class BamValidator(object):
"""
A class for creating bam validator objects that run ValidateSamFile.
"""
def __init__(self):
self.logger = logging.getLogger('sra_tool.bam_service.BamValidator')
self.dbs = SubmissionDBService(c.submission_db)
def validate_bam(self, bam_file, spuid):
"""
A method that runs ValidateSamFile to check bam files for errors.
:param bam_file:
:return: A dict with stdout and stderr of ValidateSamFile which can be analyzed to determine errors if any.
"""
self.dbs.update_sub_data(spuid, column_id="submission_status", column_value="validating")
cmd = "java -jar " + c.picard_validate_path + " I=" + bam_file + " IGNORE=INVALID_VERSION_NUMBER"
self.logger.info("validate bam : " + cmd)
try:
proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
if 'ERROR' in out.split('\n')[0]:
self.logger.error("bam validation errors found: " + str(out))
self.dbs.update_sub_data(spuid, column_id="submission_status",
column_value="not validated")
return False
else:
self.dbs.update_sub_data(spuid, column_id="submission_status", column_value="validated")
self.logger.info("No bam validation errors found.")
return True
except:
self.dbs.update_sub_data(spuid, column_id="submission_status", column_value="validation error")
class ChecksumCreator(object):
"""
A class for checksum creator objects using the md5 algorithm.
"""
def __init__(self, target_file):
self.target_file = target_file
def create_checksum(self):
"""
A method for creating a checksum of any file.
:param
:return: checksum string for file
"""
file_handle = open(self.target_file, 'rb')
checksum = hashlib.md5(file_handle.read()).hexdigest()
file_handle.close()
return checksum
def write_checksum(self, checksum):
"""
A function for writing a checksum for a file to another file with appropriate naming.
:param checksum: the md5 checksum as calculated by hashlib.
:return: name of the output file
"""
out_file_name = self.target_file + ".md5"
file_handle = open(out_file_name, "wb")
file_handle.write(checksum)
file_handle.close()
return out_file_name
class BamParser(object):
"""
A class for parsing bam files using samtools. Currently just parses bam header into a dictionary.
"""
def __init__(self):
self.logger = logging.getLogger('sra_tool.file_service.BamParser')
def list_to_dict(self, delim, in_list):
"""
A simple list to dictionary converter.
:param delim: The delimiter that seperates keys and values in each list element.
:param in_list: The input list.
:return:
"""
new_dict = dict()
for element in in_list:
if ":" in element:
new_dict[element.split(delim)[0]] = element.split(delim)[1]
self.logger.debug("List converted to dict:" + str(new_dict))
return new_dict
def parse_header(self, bam_file):
"""
A wrapper method that calls samtools to parse the header information.
:param bam_file: The bam file to parse
:return: A dictionary of bam header information.
"""
cmd = c.samtools_path + " view -H " + bam_file
self.logger.debug(msg="Header Parse cmd:" + cmd)
proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
stdout, stderr = proc.communicate()
read_group_string = stdout[stdout.find("@RG")+1:stdout.find("@PG")]
seq_string = stdout[stdout.find("@SQ")+1:stdout.find("@RG")]
header_dict = self.list_to_dict(":", read_group_string.split())
seq_dict = self.list_to_dict(":", seq_string.split())
header_dict.update(seq_dict)
self.logger.debug(msg="Header dict:" + str(header_dict))
return header_dict
class PacBioService(object):
"""
A service for operations dealing with PacBio data
"""
def __init__(self):
self.logger = logging.getLogger('sra_tool.file_service.PacBioService')
def get_pacbio_files_data(self, h5_file):
"""
A method that returns a list of info regarding pacbio files
:param h5_file: the pacbio bas.h5 read file
:return: A dictionary containing the location of analysis files as well as the root name of the pacbio read file.
"""
analysis_dir = "/".join(h5_file.split('/')[0:-1]) + "/"
if "bas" in h5_file:
root_name = h5_file.split('/')[-1].replace('.bas.h5', '')
self.logger.info("Root name set to" + root_name)
else:
self.logger.critical("Unrecognized PacBio file type:" + h5_file)
sys.exit(1)
file_list = [h5_file, analysis_dir + root_name + ".1.bax.h5",
analysis_dir + root_name + ".2.bax.h5", analysis_dir + root_name + ".3.bax.h5"]
file_data = {'analysis_dir': analysis_dir, 'root_name': root_name, 'file_list': file_list}
self.logger.debug(msg="File Data retrieved:" + str(file_data))
return file_data
def archive_maker(archive_dest, file_list):
"""
A function for making several files into a tar.gz file.
:param archive_dest: The destination in which the archive file should go.
:param file_list: List of files to archive.
:return:
"""
logger = logging.getLogger('sra_tool.file_service.archive_maker')
logger.debug("Archive Destination:" + archive_dest)
logger.debug("Archive File List:" + str(file_list))
archive = tarfile.open(archive_dest, "w:gz")
for name in file_list:
archive.add(name)
archive.close()
def rehead_bam(in_bam, bam_file, temp_dir, header):
logger = logging.getLogger('sra_tool.file_service.rehead_bam')
sam_file = temp_dir + "/temp.sam"
sam_cmd = c.samtools_path + " view -h -o " + sam_file + " " + in_bam
logger.info("Running " + sam_cmd)
subprocess.check_call(sam_cmd, stdout=subprocess.PIPE, shell=True)
reheaded_bam_path = temp_dir + "/" + header + ".reheaded.screened.bam"
rehead_cmd = c.samtools_path + " reheader " + sam_file + " " + bam_file + " > " + reheaded_bam_path
logger.info("Running " + rehead_cmd)
subprocess.check_call(rehead_cmd, stdout=subprocess.PIPE, shell=True)
return reheaded_bam_path
def unalign_bam(bam_file, out_bam):
logger = logging.getLogger('sra_tool.file_service.unalign_bam')
cmd = "java -jar " + c.picard_revert_sam + " I=" + bam_file + " O=" + out_bam
subprocess.check_call([cmd], stdout=subprocess.PIPE, shell=True)
logger.info("Running " + cmd)
return out_bam
| broadinstitute/sra_submission_tool | SRA_submission_tool/file_service.py | file_service.py | py | 7,361 | python | en | code | 0 | github-code | 50 |
635341525 | from django import forms
from .models import Posts, Profile, Comments, Likes
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
fields = ['dp','bio', 'phone_number']
class PostsForm(forms.ModelForm):
class Meta:
model = Posts
exclude = ['user']
fields = ['name','description', 'link','image1','image2','image3']
class CommentsForm(forms.ModelForm):
class Meta:
model=Comments
exclude=[]
fields=['comment']
rating_choices = [
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10'),
]
class VotesForm(forms.Form):
design = forms.CharField(label='Design level', widget=forms.RadioSelect(choices=rating_choices))
usability = forms.CharField(label='Usability level', widget=forms.RadioSelect(choices=rating_choices))
creativity = forms.CharField(label='Creativity level', widget=forms.RadioSelect(choices=rating_choices))
content = forms.CharField(label='Content level', widget=forms.RadioSelect(choices=rating_choices)) | Paul-Ngigi/alaaaa | core/forms.py | forms.py | py | 1,142 | python | en | code | 0 | github-code | 50 |
71356141595 | from bayessb.report import reporter, Result, ThumbnailResult
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from bayessb.multichain import NoPositionsException
import numpy as np
import math
reporter_group_name = "Residuals"
@reporter('Residuals at Maximum Likelihood')
def residuals_at_max_likelihood(mcmc_set):
# Get the maximum likelihood parameters
try:
(max_likelihood, max_likelihood_position) = mcmc_set.maximum_likelihood()
except NoPositionsException as npe:
return Result(None, None)
# Get the residuals
residuals = mcmc_set.chains[0].get_residuals(max_likelihood_position)
# Make the residuals plot
fig = Figure()
ax = fig.gca()
plot_filename = '%s_max_likelihood_residuals.png' % mcmc_set.name
thumbnail_filename = '%s_max_likelihood_residuals_th.png' % mcmc_set.name
ax.plot(residuals[0], residuals[1])
ax.set_title('Residuals at Maximum Likelihood')
#ax.xlabel('Time')
#ax.ylabel('Residual')
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
fig.savefig(plot_filename)
fig.savefig(thumbnail_filename, dpi=10)
return ThumbnailResult(thumbnail_filename, plot_filename)
@reporter('ACF of ML Residuals')
def acf_of_ml_residuals(mcmc_set):
# Get the maximum likelihood parameters
try:
(max_likelihood, max_likelihood_position) = mcmc_set.maximum_likelihood()
except NoPositionsException as npe:
return Result(None, None)
# Get the residuals
residuals = mcmc_set.chains[0].get_residuals(max_likelihood_position)
# Plot the autocorrelation function
acf = np.correlate(residuals[1], residuals[1], mode='full')
plot_filename = '%s_acf_of_ml_residuals.png' % mcmc_set.name
thumbnail_filename = '%s_acf_of_ml_residuals_th.png' % mcmc_set.name
fig = Figure()
ax = fig.gca()
ax.plot(acf)
ax.set_title('Autocorrelation of Maximum Likelihood Residuals')
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
fig.savefig(plot_filename)
fig.savefig(thumbnail_filename, dpi=10)
return ThumbnailResult(thumbnail_filename, plot_filename)
@reporter('Durbin-Watson Statistic of ML Residuals')
def durbin_watson_of_ml_residuals(mcmc_set):
# Get the maximum likelihood parameters
try:
(max_likelihood, max_likelihood_position) = mcmc_set.maximum_likelihood()
except NoPositionsException as npe:
return Result(None, None)
# Get the residuals
residuals = mcmc_set.chains[0].get_residuals(max_likelihood_position)
e = residuals[1]
# Calculate Durbin-Watson
d = float(np.sum([math.pow(e[i+1] - e[i], 2) for i in range(len(e)-1)])) / \
float(np.sum([math.pow(i, 2) for i in e]))
return Result(d, None, expectation=2.0)
| johnbachman/tBidBaxLipo | tbidbaxlipo/reporters/residuals.py | residuals.py | py | 2,812 | python | en | code | 0 | github-code | 50 |
28570918893 | from collections import defaultdict
from enum import Enum
import functools
from itertools import product
from lark import (
Lark,
Transformer,
v_args,
)
from typing import (
Optional,
Sequence,
Union,
)
class WhereOp(Enum):
AND = 0
OR = 1
@functools.total_ordering
class WhereLiteral:
"""
Represents a literal in query selection.
A literal is of form `attribute==value` or `attribute<>value`.
"""
def __init__(self, attribute: str, eq_value: str, negated: bool = False):
"""
:param attribute: attribute to be filtered by.
:param eq_value: the value to compare attribute to.
:param negated: If `True`, check for inequality else equality.
"""
self.attr = attribute
self.val = eq_value
self.neg = negated
def __repr__(self):
cmp = " != " if self.neg else " = "
return "Literal(" + self.attr + cmp + '"' + self.val + '")'
def __str__(self):
cmp = "!=" if self.neg else "="
return self.attr + cmp + '"' + self.val + '"'
def __eq__(self, other):
return (
self.attr == other.attr
and self.val == other.val
and self.neg == other.neg
)
def __lt__(self, other):
return (
(self.attr, self.val, self.neg) <
(other.attr, other.val, other.neg)
)
def __hash__(self):
return hash((self.attr, self.val, self.neg))
def eval(self, tuple_: dict) -> bool:
"""Evaluates if the tuples fulfils the literal."""
if self.neg:
return tuple_[self.attr] != self.val
else:
pass
return tuple_[self.attr] == self.val
@functools.total_ordering
class Where:
"""Represents the filter (WHERE clause) of a query."""
def __init__(self, parts: Sequence[Union['Where', WhereLiteral]],
op: WhereOp = WhereOp.AND, is_canonical: bool = False,
or_exclusive: Optional[bool] = None):
"""
:param parts: A sequence of `Where`s and literals.
:param op: Choose disjunction (or) or conjunction (and).
:param is_canonical: Mainly for internal use: is the filter in DNF?
:param or_exclusive: Mainly for internal use: if all subpopulations
selected by the filter are pairwise disjoint.
"""
self._parts = parts
self._op = op
self._or_exclusive = or_exclusive
self.is_canonical = is_canonical
@property
def parts(self):
return self._parts
@property
def op(self):
return self._op
def __eq__(self, other):
return set(self.parts) == set(other.parts) and self.op == other.op
def __lt__(self, other):
return (
(self.op, sorted(self.parts)) < (other.op, sorted(other.parts))
)
def __hash__(self):
return hash((*self.parts, self.op))
def __repr__(self):
return ("WHERE_" + self.op.name + "(" +
", ".join(map(lambda p: p.__repr__(), self.parts)) +
")")
def __str__(self):
return '(' + (' ' + self.op.name + ' ').join(map(str, self.parts)) + ')'
def is_and_clause(self) -> bool:
"""Checks if the filter is an and clause (conjunction of literals)."""
return (self.op == WhereOp.AND
and all(map(lambda p: isinstance(p, WhereLiteral),
self.parts))
and all(self.parts[i] < self.parts[i + 1]
for i in range(len(self.parts) - 1)))
def is_wrapped_and_clause(self) -> bool:
"""
Checks if the filter is a wrape and clause.
I.e. a conjunction of literals wrapped in a else empty or `Where`.
"""
return (self.op == WhereOp.OR
and len(self.parts) == 1
and isinstance(self.parts[0], Where)
and self.parts[0].is_and_clause())
def _simplify_and_clause(self, clause: "Where") -> Union["Where", bool]:
"""
Returns a simplified, equivalent and clause.
Simplifications include:
- remove duplicate literals
- remove negated literals if un-negated exist for the same attribute
- check for some unsatisfiabilities and tautologies.
:param clause: the to be simplified clause.
:returns: the simplified clause (not necessarily a new `Where`) or
`False` if it's obviously unsatisfiable
`True` if it's an obvious tautology.
"""
assert clause.is_and_clause()
c_parts = set(clause.parts)
assert len(c_parts) == len(clause.parts)
if len(c_parts) == len({p.attr for p in c_parts}):
# all attributes are unique
return clause
new_parts = set()
pos_values = {}
neg_values = defaultdict(set)
for p in (p for p in c_parts if not p.neg):
# process positive matches first as they are more restrictive
if p.attr in pos_values:
val = pos_values[p.attr]
if p.val == val:
continue # redundant literal
else:
return False # unsatisfiable: attr=val1 AND attr=val2
pos_values[p.attr] = p.val
new_parts.add(p)
for p in (p for p in c_parts if p.neg):
if p.attr in pos_values:
val = pos_values[p.attr]
if p.val == val:
return False # unsatisfiable: attr=val1 AND attr!=val1
else:
continue # over-specified: attr=val1 AND attr!=val2
if neg_values[p.attr]:
values = neg_values[p.attr]
if p.val in values:
continue # redundant literal
else:
pass # fine: attr!=val1 AND attr!=val2
# raise NotImplemented(
# "cannot handle multiple negated literals for the "
# "same attribute e.g. attr != val1 AND attr != val2"
# )
neg_values[p.attr].add(p.val)
new_parts.add(p)
assert len(set(new_parts)) == len(new_parts)
return Where(parts=tuple(sorted(new_parts)), op=WhereOp.AND)
def canonical(self) -> Union["Where", bool]:
"""
Turns the filter into an equivalent filter in DNF.
*This function has side-effects!* It can alter this instance.
:returns: an equivalent filter in disjunctive normal form (DNF),
`False` if it's obviously unsatisfiable, or
`True` if it's obviously a tautology.
"""
if self.is_canonical:
return self
literals = [p for p in self.parts if isinstance(p, WhereLiteral)]
clauses = [p.canonical() for p in self.parts if isinstance(p, Where)]
if self.op == WhereOp.AND:
if any(c is False for c in clauses):
# whole formula is unsatisfiable
return False
else:
if any(c is True for c in clauses):
# whole formula is tautological
return True
clauses = [c for c in clauses if not isinstance(c, bool)]
clauses = list(set(c for c in clauses
if isinstance(c, bool) or c.parts))
other_op_clauses = []
for clause in clauses:
if clause.op != self.op:
other_op_clauses.append(clause)
else:
for p in clause.parts:
if isinstance(p, WhereLiteral):
literals.append(p)
else:
assert isinstance(p, Where)
assert p.op != self.op
other_op_clauses.append(p)
if not other_op_clauses:
if self.op == WhereOp.OR:
ands = map(lambda l: Where(parts=(l,), op=WhereOp.AND),
sorted(set(literals)))
return Where(parts=tuple(ands), op=self.op, is_canonical=True,
or_exclusive=self._or_exclusive)
else:
and_ = Where(parts=tuple(sorted(set(literals))), op=self.op)
and_s = self._simplify_and_clause(and_)
if and_s is False:
# print("Dropping unsatisfiable clause: %s" % repr(and_))
return False
return Where(parts=(and_s,), op=WhereOp.OR, is_canonical=True,
or_exclusive=self._or_exclusive)
if self.op == WhereOp.OR:
assert all(
all(
isinstance(p, WhereLiteral) for p in c.parts
) for c in other_op_clauses
)
literal_clauses = map(lambda l: Where(parts=(l,), op=WhereOp.AND),
set(literals))
parts = sorted({*literal_clauses, *other_op_clauses})
return Where(parts=tuple(parts), op=WhereOp.OR, is_canonical=True,
or_exclusive=self._or_exclusive)
else:
new_and_clauses = []
seen_combinations = set((l,) for l in literals)
if len(other_op_clauses) <= 1:
part_combinations = [(p,) for c in other_op_clauses
for p in c.parts]
else:
part_combinations = product(*map(lambda c: c.parts,
other_op_clauses))
for com in part_combinations:
inner_literals = [
l for p in
(list(c.parts) if isinstance(c, Where) else [c]
for c in com)
for l in p
]
parts = tuple(sorted(set(inner_literals + literals)))
if parts not in seen_combinations:
seen_combinations.add(parts)
new_and_clauses.append(Where(parts=parts, op=WhereOp.AND))
new_and_clauses_filtered = set()
for c in new_and_clauses:
c = self._simplify_and_clause(c)
if c:
new_and_clauses_filtered.add(c)
# else:
# print("Dropping unsatisfiable clause: %s" % repr(c))
new_and_clauses_filtered.add(c)
if not new_and_clauses_filtered:
return False
new_parts = tuple(sorted(new_and_clauses_filtered))
return Where(parts=new_parts, op=WhereOp.OR, is_canonical=True,
or_exclusive=self._or_exclusive)
def or_exclusive(self) -> bool:
"""
Checks if all selected subpopulations are pairwise disjoint.
:raises ValueError: if this filter ins not canonical.
"""
if self._or_exclusive is not None:
return self._or_exclusive
if not self.is_canonical:
raise ValueError("Can only check or_exclusive() for canonical "
"queries.")
for i in range(len(self.parts)):
i_j_disjoint = True # in case i == len(self.parts) + 1
for j in range(i + 1, len(self.parts)):
i_j_disjoint = False
for lit_i in self.parts[i].parts:
for lit_j in self.parts[j].parts:
if (lit_i.attr == lit_j.attr
and ((lit_i.val != lit_j.val)
^ (lit_i.neg != lit_j.neg))):
i_j_disjoint = True
break
if i_j_disjoint:
break
if i_j_disjoint:
break
if not i_j_disjoint:
self._or_exclusive = False
return False
self._or_exclusive = True
return True
def eval(self, tuple_: dict) -> bool:
"""Checks if the given tuple satisfies the selection."""
reduce = all if self._op == WhereOp.AND else any
return reduce(map(lambda x: x.eval(tuple_), self._parts))
WHERE_GRAMMAR = r"""
%import common.CNAME -> NAME
%import common.NUMBER
%import common.WS
start: or_clause -> start
or_clause: and_clause -> id
| and_clause (_OR and_clause)+ -> or_
and_clause: atom -> id
| atom (_AND atom)+ -> and_
atom: NAME _EQUAL value -> eq
| NAME _NOT_EQUAL value -> neq
| "(" or_clause ")" -> id
value: QUOTED_STR -> quoted_val
| NAME -> id
| NUMBER -> id
QUOTED_STR: /\".*\"(?<!\\\")|\'.*\'(?<!\\\')/
_OR: /(?<=\s)OR(?=\s)/
_AND: /(?<=\s)AND(?=\s)/
_EQUAL: "=="
| "="
_NOT_EQUAL: "!="
| "<>"
%ignore WS
"""
@v_args(inline=True)
class _T(Transformer):
def start(self, clause):
if not isinstance(clause, Where):
return Where(parts=(clause,))
return clause
def or_(self, *clauses):
# print('or', clauses)
return Where(parts=clauses, op=WhereOp.OR)
# raise NotImplementedError('WHERE currently only supports AND')
def and_(self, *clauses):
# print('and', clauses)
return Where(parts=clauses, op=WhereOp.AND)
def eq(self, name, value):
# print('eq', name, value)
return WhereLiteral(attribute=str(name), eq_value=str(value))
def neq(self, name, value):
# print('neq', name, value)
return WhereLiteral(attribute=str(name), eq_value=str(value),
negated=True)
def quoted_val(self, val):
return str(val)[1:-1]
def id(self, val):
return val
parse_where = Lark(WHERE_GRAMMAR, parser='lalr', transformer=_T()).parse
class Query:
"""Represents a whole query (selection and grouped attributes)."""
def __init__(self, groups: Sequence[str], where: Union[bool, Where] = True):
"""
:param groups: the attributes to group by.
:param where: the selection. Use `True` to indicate no selection.
"""
assert groups
self.groups = groups
self.where = where
self.is_canonical = False
def __hash__(self):
return hash((*self.groups, self.where))
def __eq__(self, other: "Query") -> bool:
return (set(self.groups) == set(other.groups)
and self.where == other.where)
def canonical(self) -> "Query":
"""Turn the query into canonical form: sorted groups & where in DNF."""
if self.is_canonical:
return self
if isinstance(self.where, bool):
q = Query(sorted(self.groups), self.where)
else:
q = Query(sorted(self.groups), self.where.canonical())
q.is_canonical = True
return q
def __str__(self):
return "GROUP BY {} WHERE {}".format(
", ".join(self.groups), str(self.where).lower()
)
class QueryRewriteEngine:
# TODO: out of scope
@staticmethod
def optimize(q: Query, result_cache: 'ResultCache') -> Query:
pass
if __name__ == '__main__':
parser = Lark(WHERE_GRAMMAR, parser='lalr', transformer=_T())
res = parser.parse("a=a AND b=b AND c=c OR d=d OR e=e")
res2 = parser.parse("name=\"test est\" AND (a=b OR c==d)")
res3 = Where(parts=(res, res2))
print(res3)
print(res3.canonical())
print(parser.parse("(a=b OR c=a) AND d=d"))
res4 = parser.parse("a=a")
print(res4)
print(res4.canonical().__repr__(), '<-------------')
print(parser.parse("a=a AND b=b OR c=c").canonical().__repr__())
print(parser.parse("a=a OR b=b AND c=c"))
print(Lark(WHERE_GRAMMAR, parser='lalr').parse("a=a AND b=b OR c=c"))
print(Lark(WHERE_GRAMMAR, parser='lalr').parse("a=a OR b=b AND c=c"))
w = Where(
parts=(
WhereLiteral('a', 'a'),
WhereLiteral('b', 'a'),
),
op=WhereOp.AND
)
w = Where(parts=(w,), op=WhereOp.OR)
print(repr(w))
print(repr(w.canonical()))
w2 = Where(parts=(w, WhereLiteral('c', 'c')), op=WhereOp.AND)
print(repr(w2))
print(repr(w2.canonical()))
| robsdedude/proggers | proggers/query.py | query.py | py | 16,544 | python | en | code | 1 | github-code | 50 |
72454025116 | class Solution:
def numberOfLines(self, widths: List[int], S: str) -> List[int]:
ABC = "abcdefghijklmnopqrstuvwxyz"
counter, lines = 0, 1
_dict = {}
for i in range(len(widths)):
_dict[ABC[i]] = widths[i]
for i in S:
counter += _dict[i]
if counter > 100:
lines += 1
counter = _dict[i]
return [lines, counter] | Mayureshd-18/test2 | my-folder/problems/number_of_lines_to_write_string/solution.py | solution.py | py | 462 | python | en | code | 0 | github-code | 50 |
18197122309 | import math
import tkinter as tk
from PIL import Image,ImageTk
janela = tk.Tk()
janela.geometry("680x780")
janela.title("Calculador de Equação do Segundo Grau")
a = tk.Label(text="Informe o a:")
a.grid(column=0,row=1)
b = tk.Label(text="Informe o b:")
b.grid(column=0,row=2)
c = tk.Label(text="Informe o c:")
c.grid(column=0,row=3)
aEntrada = tk.Entry()
aEntrada.grid(column=1,row=1)
bEntrada = tk.Entry()
bEntrada.grid(column=1,row=2)
cEntrada = tk.Entry()
cEntrada.grid(column=1,row=3)
def getEntrada():
dados = EquacaoSegundoGrau(float(aEntrada.get()),float(bEntrada.get()),float(cEntrada.get()))
textArea = tk.Text(master=janela,height=10,width=25)
textArea.grid(column=1,row=6)
resposta = dados.delta()
textArea.insert(tk.END,resposta)
botao = tk.Button(janela,text='Calcular Eq do segundo grau',command=getEntrada,bg='red')
botao.grid(column=1,row=6)
class EquacaoSegundoGrau:
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
def delta(self):
if self.a == 0:
return f'Não é equação do segundo grau'
else:
self.delta = (math.pow(self.b, 2) - 4 * self.a * self.c)
if self.delta < 0:
return f'Não há raízes'
elif self.delta == 0:
return f'Só há uma raiz {"%.2f" % float((-self.b + math.sqrt(self.delta)) / self.a * 2)}'
else:
return f'Há duas raizes {"%.2f" % float((-self.b + math.sqrt(self.delta)) / self.a * 2)},{"%.2f" % float((-self.b - math.sqrt(self.delta)) / self.a * 2)}'
foto = Image.open("Captura de tela em 2022-11-30 09-54-56.png")
foto.thumbnail((300,300),Image.ANTIALIAS)
photo=ImageTk.PhotoImage(foto)
foto_label = tk.Label(image=photo)
foto_label.grid(column=1,row=0)
janela.mainloop()
| Heber3000/Calculadora_de_equa-o_do_segundo_grau | projeto.py | projeto.py | py | 1,825 | python | pt | code | 0 | github-code | 50 |
15823622286 | import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.renderers.default='browser'
from sklearn.metrics import roc_auc_score
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
# read data train
train = train = pd.read_csv(
"C://Users//aigerimb//Desktop//riid_prediction_competition//train.csv",
usecols=[1, 2, 3, 4, 5, 7, 8, 9],
dtype={
'timestamp': 'int64',
'user_id': 'int32',
'content_id': 'int16',
'content_type_id': 'int8',
'task_container_id': 'int16',
'answered_correctly':'int8',
'prior_question_elapsed_time': 'float32',
'prior_question_had_explanation': 'boolean'
}
)
questions = pd.read_csv(
'C://Users//aigerimb//Desktop//riid_prediction_competition//questions.csv',
usecols=[0, 3],
dtype={
'question_id': 'int16',
'part': 'int8'}
)
lectures = pd.read_csv('C://Users//aigerimb//Desktop//riid_prediction_competition//lectures.csv')
# replace strings
lectures['type_of'].replace('solving question', 'solving_question', inplace=True)
# feature generation from lectures.csv
#convert categorical variables to one-hot encoding
lectures = pd.get_dummies(lectures, columns=['part', 'type_of'])
lectures_part_col = [cols for cols in lectures.columns if cols.startswith('part')]
lectures_type_of_col = [cols for cols in lectures.columns if cols.startswith('type_of')]
train_lectures = train[train['content_type_id']==1].merge(lectures, how='left', left_on='content_id', right_on='lecture_id')
# how many times student watched different lectures by part and type
user_stats_per_lecture = train_lectures.groupby('user_id')[lectures_part_col+lectures_type_of_col].sum()
del(train_lectures)
# drop = True to avoid adding the old index
train = train[train.content_type_id == False].sort_values('timestamp').reset_index(drop = True)
train[(train['content_type_id']==0)]['task_container_id'].nunique() #saving value to fillna
elapsed_mean = train.prior_question_elapsed_time.mean()
# how many times in average user answerred correctly
results_u_final = train.loc[(train['content_type_id']==0, ['user_id', 'answered_correctly'])].groupby(['user_id']).agg(['mean'])
results_u_final.columns = ['answerred_correctly_user']
results_u3_final = train.loc[(train['content_type_id']==0, ['user_id', 'answered_correctly'])].groupby(['user_id']).agg(['count'])
results_u3_final.columns = ['total_q_user']
# average of explantions for questions per user
results_u2_final = train.loc[train.content_type_id == False, ['user_id','prior_question_had_explanation']].groupby(['user_id']).agg(['mean'])
results_u2_final.columns = ['explanation_mean_user']
train = pd.merge(train, questions, left_on = 'content_id', right_on = 'question_id', how = 'left')
results_q_final = train.loc[train.content_type_id == False, ['question_id','answered_correctly']].groupby(['question_id']).agg(['mean'])
results_q_final.columns = ['quest_pct']
question2 = pd.merge(questions, results_q_final, left_on = 'question_id', right_on = 'question_id', how = 'left')
prior_mean_user = results_u2_final.explanation_mean_user.mean()
train.drop(['timestamp', 'content_type_id', 'question_id', 'part'], axis=1, inplace=True)
# creating validation set:
validation = train.groupby('user_id').tail(5)
train = train[~train.index.isin(validation.index)]
len(train) + len(validation)
results_u_val = train[['user_id','answered_correctly']].groupby(['user_id']).agg(['mean'])
results_u_val.columns = ['answered_correctly_user']
results_u2_val = train[['user_id','prior_question_had_explanation']].groupby(['user_id']).agg(['mean'])
results_u2_val.columns = ['explanation_mean_user']
results_u3_val = train[['user_id','answered_correctly']].groupby(['user_id']).agg(['count'])
results_u3_val.columns = ['total_q_user']
X = train.groupby('user_id').tail(18)
train = train[~train.index.isin(X.index)]
results_u_X = train[['user_id','answered_correctly']].groupby(['user_id']).agg(['mean'])
results_u_X.columns = ['answered_correctly_user']
results_u2_X = train[['user_id','prior_question_had_explanation']].groupby(['user_id']).agg(['mean'])
results_u2_X.columns = ['explanation_mean_user']
results_u3_X = train[['user_id','answered_correctly']].groupby(['user_id']).agg(['count'])
results_u3_X.columns = ['total_q_user']
#clearing memory
del(train)
#X = pd.merge(X, group3, left_on=['task_container_id'], right_index= True, how="left")
X = pd.merge(X, results_u_X, on=['user_id'], how="left")
X = pd.merge(X, results_u2_X, on=['user_id'], how="left")
X = pd.merge(X, results_u3_X, on=['user_id'], how="left")
X = pd.merge(X, user_stats_per_lecture, on=['user_id'], how="left")
#validation = pd.merge(validation, group3, left_on=['task_container_id'], right_index= True, how="left")
validation = pd.merge(validation, results_u_val, on=['user_id'], how="left")
validation = pd.merge(validation, results_u2_val, on=['user_id'], how="left")
validation = pd.merge(validation, results_u3_val, on=['user_id'], how="left")
validation = pd.merge(validation, user_stats_per_lecture, on=['user_id'], how="left")
from sklearn.preprocessing import LabelEncoder
lb_make = LabelEncoder()
X.prior_question_had_explanation.fillna(False, inplace = True)
validation.prior_question_had_explanation.fillna(False, inplace = True)
validation["prior_question_had_explanation_enc"] = lb_make.fit_transform(validation["prior_question_had_explanation"])
X["prior_question_had_explanation_enc"] = lb_make.fit_transform(X["prior_question_had_explanation"])
content_mean = question2.quest_pct.mean()
question2.quest_pct.mean()
X = pd.merge(X, question2, left_on = 'content_id', right_on = 'question_id', how = 'left')
validation = pd.merge(validation, question2, left_on = 'content_id', right_on = 'question_id', how = 'left')
X.part = X.part - 1
validation.part = validation.part - 1
y = X['answered_correctly']
X = X.drop(['answered_correctly'], axis=1)
X.head()
y_val = validation['answered_correctly']
X_val = validation.drop(['answered_correctly'], axis=1)
X = X[['answered_correctly_user', 'explanation_mean_user', 'quest_pct', 'total_q_user',
'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part',
'part_1', 'part_2', 'part_3', 'part_4', 'part_5', 'part_6', 'part_7',
'type_of_concept', 'type_of_intention', 'type_of_solving_question', 'type_of_starter']]
X_val = X_val[['answered_correctly_user', 'explanation_mean_user', 'quest_pct', 'total_q_user',
'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part',
'part_1', 'part_2', 'part_3', 'part_4', 'part_5', 'part_6', 'part_7',
'type_of_concept', 'type_of_intention', 'type_of_solving_question', 'type_of_starter']]
# Filling with 0.5 for simplicity; there could likely be a better value
X['answered_correctly_user'].fillna(0.65, inplace=True)
X['explanation_mean_user'].fillna(prior_mean_user, inplace=True)
X['quest_pct'].fillna(content_mean, inplace=True)
X['part'].fillna(4, inplace = True)
X['total_q_user'].fillna(X['total_q_user'].mean(), inplace = True)
X['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True)
X['prior_question_had_explanation_enc'].fillna(0, inplace = True)
X['part_1'].fillna(0, inplace = True)
X['part_2'].fillna(0, inplace = True)
X['part_3'].fillna(0, inplace = True)
X['part_4'].fillna(0, inplace = True)
X['part_5'].fillna(0, inplace = True)
X['part_6'].fillna(0, inplace = True)
X['part_7'].fillna(0, inplace = True)
X['type_of_concept'].fillna(0, inplace = True)
X['type_of_intention'].fillna(0, inplace = True)
X['type_of_solving_question'].fillna(0, inplace = True)
X['type_of_starter'].fillna(0, inplace = True)
X_val['answered_correctly_user'].fillna(0.65, inplace=True)
X_val['explanation_mean_user'].fillna(prior_mean_user, inplace=True)
X_val['quest_pct'].fillna(content_mean, inplace=True)
X_val['part'].fillna(4, inplace = True)
X_val['total_q_user'].fillna(X['total_q_user'].mean(), inplace = True)
X_val['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True)
X_val['prior_question_had_explanation_enc'].fillna(0, inplace = True)
X_val['part_1'].fillna(0, inplace = True)
X_val['part_2'].fillna(0, inplace = True)
X_val['part_3'].fillna(0, inplace = True)
X_val['part_4'].fillna(0, inplace = True)
X_val['part_5'].fillna(0, inplace = True)
X_val['part_6'].fillna(0, inplace = True)
X_val['part_7'].fillna(0, inplace = True)
X_val['type_of_concept'].fillna(0, inplace = True)
X_val['type_of_intention'].fillna(0, inplace = True)
X_val['type_of_solving_question'].fillna(0, inplace = True)
X_val['type_of_starter'].fillna(0, inplace = True)
import lightgbm as lgb
params = {
'objective': 'binary',
'seed': 42,
'metric': 'auc',
'learning_rate': 0.075,
'max_bin': 800,
'num_leaves': 80
}
lgb_train = lgb.Dataset(X, y, categorical_feature = ['part', 'prior_question_had_explanation_enc'])
lgb_eval = lgb.Dataset(X_val, y_val, categorical_feature = ['part', 'prior_question_had_explanation_enc'], reference=lgb_train)
model = lgb.train(
params,
lgb_train,
valid_sets=[lgb_train, lgb_eval],
verbose_eval=100,
num_boost_round=10000,
early_stopping_rounds=10
)
y_pred = model.predict(X_val)
y_true = np.array(y_val)
roc_auc_score(y_true, y_pred) | aigerimb/Time-Series-Analysis- | riid_prediction.py | riid_prediction.py | py | 9,419 | python | en | code | 0 | github-code | 50 |
24228282534 | from datetime import date
from typing import List, Dict, Union
import requests
import pandas as pd
class Wig20Scraper:
def __init__(self, from_date: date, to_date: date) -> None:
self.from_date = from_date
self.to_date = to_date
def build_url(self) -> str:
base: str = "https://gpwbenchmark.pl/chart-json.php"
from_date_str = self.from_date.strftime("%Y-%m-%d")
to_date_str = self.to_date.strftime("%Y-%m-%d")
req: str = "[{%22isin%22:%22PL9999999987%22,%22mode%22:%22RANGE%22,%22from%22:%22" + \
from_date_str + \
"%22,%22to%22:%22" + \
to_date_str + \
"%22}]"
return base + "?req=" + req
def get_data(self) -> List[Dict[str, float]]:
url = self.build_url()
response = requests.get(url)
return response.json()[0]["data"]
def parse_data(data: List[Dict[str, float]]) -> pd.DataFrame:
data_dict: Dict[str, List[Union[float, date]]] = {
"date": [],
"open": [],
"close": [],
"min": [],
"max": []
}
for item in data:
data_dict["date"].append(date.fromtimestamp(item["t"]))
data_dict["open"].append(item["o"])
data_dict["close"].append(item["c"])
data_dict["min"].append(item["l"])
data_dict["max"].append(item["h"])
df = pd.DataFrame(data_dict)
df.set_index("date", inplace=True)
df = df.asfreq('B', method="ffill")
return df
if __name__ == "__main__":
start = date(2021, 7, 5)
end = date(2022, 7, 5)
scraper = Wig20Scraper(start, end)
data = scraper.get_data()
print(data)
wig20_df = parse_data(data)
print(wig20_df) | AleksanderWWW/wig20-prediction-app | app/src/extract.py | extract.py | py | 1,734 | python | en | code | 1 | github-code | 50 |
40136635880 | #!/usr/bin/env python3
from __future__ import print_function
import sys
from ROOT import *
import os
from subprocess import call
import os.path
import shutil
import subprocess
import codecs
import re
import errno
from getGTfromDQMFile_V2 import getGTfromDQMFile
def setRunDirectory(runNumber):
# Don't forget to add an entry when there is a new era
dirDict = { 325799:['Data2018', 'HIRun2018'],\
315252:['Data2018', 'Run2018'],\
308336:['Data2018', 'Commissioning2018'],\
294644:['Data2017', 'Run2017'],\
290123:['Data2017', 'Commissioning2017'],\
284500:['Data2016', 'PARun2016'],\
271024:['Data2016', 'Run2016'],\
264200:['Data2016', 'Commissioning2016'],\
246907:['Data2015', 'Run2015'],\
232881:['Data2015', 'Commissioning2015'],\
211658:['Data2013', 'Run2013'],\
209634:['Data2013', 'HIRun2013'],\
190450:['Data2012', 'Run2012']}
runKey=0
for key in sorted(dirDict):
if runNumber > key:
runKey=key
return dirDict[runKey]
def downloadOfflineDQMhisto(run, Run_type,rereco):
runDirval=setRunDirectory(run)
DataLocalDir=runDirval[0]
DataOfflineDir=runDirval[1]
nnn=run/100
print('Processing '+ Run_type + ' in '+DataOfflineDir+"...")
File_Name = ''
print('Directory to fetch the DQM file from: https://cmsweb.cern.ch/dqm/offline/data/browse/ROOT/OfflineData/'+DataOfflineDir+'/'+Run_type+'/000'+str(nnn)+'xx/')
url = 'https://cmsweb.cern.ch/dqm/offline/data/browse/ROOT/OfflineData/'+DataOfflineDir+'/'+Run_type+'/000'+str(nnn)+'xx/'
os.popen("curl -k --cert /data/users/cctrkdata/current/auth/proxy/proxy.cert --key /data/users/cctrkdata/current/auth/proxy/proxy.cert -X GET "+url+" > index.html")
f=codecs.open("index.html", 'r')
index = f.readlines()
if any(str(Run_Number[i]) in s for s in index):
for s in index:
if rereco:
if (str(Run_Number[i]) in s) and ("__DQMIO.root" in s) and ("17Sep2018" in s):
File_Name = str(str(s).split("xx/")[1].split("'>DQM")[0])
else:
if (str(Run_Number[i]) in s) and ("__DQMIO.root" in s):
File_Name = str(str(s).split("xx/")[1].split("'>DQM")[0])
else:
print('No DQM file available. Please check the Offline server')
sys.exit(0)
print('Downloading DQM file:'+File_Name)
os.system('curl -k --cert /data/users/cctrkdata/current/auth/proxy/proxy.cert --key /data/users/cctrkdata/current/auth/proxy/proxy.cert -X GET https://cmsweb.cern.ch/dqm/offline/data/browse/ROOT/OfflineData/'+DataOfflineDir+'/'+Run_type+'/000'+str(nnn)+'xx/'+File_Name+' > /tmp/'+File_Name)
return File_Name
def downloadOfflinePCLhisto(run, Run_type):
runDirval=setRunDirectory(run)
DataLocalDir=runDirval[0]
DataOfflineDir=runDirval[1]
nnn=run/100
print('Processing '+ Run_type + ' in '+DataOfflineDir+"...")
File_Name = 'Temp'
print('Directory to fetch the DQM file from: https://cmsweb.cern.ch/dqm/offline/data/browse/ROOT/OfflineData/'+DataOfflineDir+'/'+Run_type+'/000'+str(nnn)+'xx/')
url = 'https://cmsweb.cern.ch/dqm/offline/data/browse/ROOT/OfflineData/'+DataOfflineDir+'/'+Run_type+'/000'+str(nnn)+'xx/'
os.popen("curl -k --cert /data/users/cctrkdata/current/auth/proxy/proxy.cert --key /data/users/cctrkdata/current/auth/proxy/proxy.cert -X GET "+url+" > index.html")
f=codecs.open("index.html", 'r')
index = f.readlines()
if any(str(Run_Number[i]) in s for s in index):
for s in index:
if (str(Run_Number[i]) in s) and ("PromptCalibProdSiPixel-Express" in s) and ("__ALCAPROMPT.root" in s):
File_Name = str(str(s).split("xx/")[1].split("'>DQM")[0])
else:
print('No DQM file available. Please check the Offline server')
sys.exit(0)
if File_Name!='Temp':
print('Downloading DQM file:'+File_Name)
os.system('curl -k --cert /data/users/cctrkdata/current/auth/proxy/proxy.cert --key /data/users/cctrkdata/current/auth/proxy/proxy.cert -X GET https://cmsweb.cern.ch/dqm/offline/data/browse/ROOT/OfflineData/'+DataOfflineDir+'/'+Run_type+'/000'+str(nnn)+'xx/'+File_Name+' > /tmp/'+File_Name)
return File_Name
def downloadnlineDQMhisto(run, Run_type):
runDirval=setRunDirectory(run)
DataLocalDir=runDirval[0]
DataOfflineDir=runDirval[1]
nnn=run/100
nnnOnline = run/10000
File_Name_online=''
deadRocMap = False
##################online file########
url1 = 'https://cmsweb.cern.ch/dqm/online/data/browse/Original/000'+str(nnnOnline)+'xxxx/000'+str(nnn)+'xx/'
os.popen("curl -k --cert /data/users/cctrkdata/current/auth/proxy/proxy.cert --key /data/users/cctrkdata/current/auth/proxy/proxy.cert -X GET "+url1+" > index_online.html")
url2 = 'https://cmsweb.cern.ch/dqm/offline/data/browse/ROOT/OnlineData/original/000'+str(nnnOnline)+'xxxx/000'+str(nnn)+'xx/'
os.popen("curl -k --cert /data/users/cctrkdata/current/auth/proxy/proxy.cert --key /data/users/cctrkdata/current/auth/proxy/proxy.cert -X GET "+url2+" > index_online_backup.html")
f_online_backup=codecs.open("index_online_backup.html", 'r')
index_online_backup = f_online_backup.readlines()
f_online=codecs.open("index_online.html", 'r')
index_online = f_online.readlines()
if any(str(run) in x for x in index_online):
for x in index_online:
if (str(run) in x) and ("_PixelPhase1_" in x):
File_Name_online=str(str(x).split(".root'>")[1].split("</a></td><td>")[0])
deadRocMap = True
else:
print("Can't find any file in offline server, trying the online server")
if any(str(run) in y for y in index_online_backup):
for y in index_online:
if (str(run) in y) and ("_PixelPhase1_" in y):
File_Name_online=str(str(y).split(".root'>")[1].split("</a></td><td>")[0])
deadRocMap = True
else:
print('No Online DQM file available. Skip dead roc map')
deadRocMap = False
print('Downloading DQM file:'+File_Name_online)
os.system('curl -k --cert /data/users/cctrkdata/current/auth/proxy/proxy.cert --key /data/users/cctrkdata/current/auth/proxy/proxy.cert -X GET https://cmsweb.cern.ch/dqm/online/data/browse/Original/000'+str(nnnOnline)+'xxxx/000'+str(nnn)+'xx/'+File_Name_online+' > /tmp/'+File_Name_online)
os.remove('index_online.html')
os.remove('index_online_backup.html')
return deadRocMap, File_Name_online;
def getGT(DQMfile, RunNumber, globalTagVar):
globalTag_v0 = getGTfromDQMFile(DQMfile, RunNumber, globalTagVar)
print("Global Tag: " + globalTag_v0)
globalTag = globalTag_v0
for z in range(len(globalTag_v0)-2):#clean up the garbage string in the GT
if (globalTag_v0[z].isdigit()) and (globalTag_v0[z+1].isdigit()) and (globalTag_v0[z+2].isdigit()) and(globalTag_v0[z+3].isupper()):
globalTag = globalTag_v0[z:]
break
if globalTag == "":
print(" No GlobalTag found: trying from DAS.... ")
globalTag = str(os.popen('getGTscript.sh '+filepath+ File_Name+' ' +str(Run_Number[i])));
if globalTag == "":
print(" No GlobalTag found for run: "+str(Run_Number[i]))
return globalTag
Run_type = sys.argv[1]
Run_Number = [int(x) for x in sys.argv[2:]]
CMSSW_BASE = str(os.popen('echo ${CMSSW_BASE}').read().strip())
rereco=False
###########Check if user enter the right run type######################
if Run_type == 'Cosmics' or Run_type == 'StreamExpress' or Run_type == 'StreamExpressCosmics' or Run_type == 'ZeroBias' or Run_type == 'StreamHIExpress' or Run_type == 'HIMinimumBias1' or re.match('ZeroBias([0-9]+?)',Run_type) or re.match('HIMinimumBias([0-9]+?)',Run_type):
print(Run_type)
elif Run_type == 'ReReco':
rereco=True
Run_type='ZeroBias'
else:
print("please enter a valid run type: Cosmics | ZeroBias | StreamExpress | StreamExpressCosmics ")
sys.exit(0)
for i in range(len(Run_Number)):
#################Downloading DQM file############################
print("Downloading File!!")
nnnOut = Run_Number[i]/1000
filepath = '/tmp/'
File_Name = downloadOfflineDQMhisto(Run_Number[i], Run_type, rereco)
if Run_type=="StreamExpress" or Run_type=="StreamHIExpress":
File_Name_PCL = downloadOfflinePCLhisto(Run_Number[i], Run_type)
deadRocMap, File_Name_online = downloadnlineDQMhisto(Run_Number[i], Run_type)
runDirval=setRunDirectory(Run_Number[i])
DataLocalDir=runDirval[0]
#DataOfflineDir=runDirval[1]
################Check if run is complete##############
print("get the run status from DQMFile")
check_command = 'check_runcomplete '+filepath+File_Name
Check_output = subprocess.call(check_command, shell=True)
if Check_output == 0:
print('Using DQM file: '+File_Name)
else:
print('*****************Warning: DQM file is not ready************************')
input_var = input("DQM file is incompleted, do you want to continue? (y/n): ")
if (input_var == 'y') or (input_var == 'Y'):
print('Using DQM file: '+File_Name)
else:
sys.exit(0)
if Run_type=="StreamExpress" or Run_type=="StreamHIExpress":
if File_Name_PCL=='Temp':
print(' ')
print(' ')
print('*****************Warning: PCL file is not ready************************')
input_var = input("PCL file is not ready, you will need to re-run the script later for PCL plots, do you want to continue? (y/n): ")
if (input_var == 'y') or (input_var == 'Y'):
print('--------> Remember to re-run the script later!!!!!')
else:
sys.exit(0)
###################Start making TkMaps################
checkfolder = os.path.exists(str(Run_Number[i]))
if checkfolder == True:
shutil.rmtree(str(Run_Number[i]))
os.makedirs(str(Run_Number[i])+'/'+Run_type)
else:
os.makedirs(str(Run_Number[i])+'/'+Run_type)
#######Getting GT##############
####After switch production to 10_X_X release, the clean up section need to be reviewed and modified ##########
globalTag = getGT(filepath+File_Name, str(Run_Number[i]), 'globalTag_Step1')
####################################################
print(" Creating the TrackerMap.... ")
detIdInfoFileName = 'TkDetIdInfo_Run'+str(Run_Number[i])+'_'+Run_type+'.root'
workPath = os.popen('pwd').readline().strip()
os.chdir(str(Run_Number[i])+'/'+Run_type)
os.system('cmsRun ${CMSSW_BASE}/src/DQM/SiStripMonitorClient/test/SiStripDQM_OfflineTkMap_Template_cfg_DB.py globalTag='+globalTag+' runNumber='+str(Run_Number[i])+' dqmFile='+filepath+'/'+File_Name+' detIdInfoFile='+detIdInfoFileName)
os.system('rm -f *svg')
####################### rename bad module list file######################
sefile = 'QualityTest_run'+str(Run_Number[i])+'.txt'
shutil.move('QTBadModules.log',sefile)
################### put color legend in the TrackerMap###################
# PLEASE UNCOMMENT THE LINES BELOW TO GET THE LEGEND ON THE QT TkMAP (it will work only on vocms061)
# os.system('/usr/bin/python ${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/LegendToQT.py QTestAlarm.png /data/users/cctrack/FinalLegendTrans.png')
# shutil.move('result.png', 'QTestAlarm.png')
####################Copying the template html file to index.html########################
if Run_type == "Cosmics" or Run_type == "StreamExpressCosmics":
os.system('cat ${CMSSW_BASE}/src/DQM/SiStripMonitorClient/data/index_template_TKMap_cosmics.html | sed -e "s@RunNumber@'+str(Run_Number[i])+'@g" > index.html')
elif Run_type == "StreamExpress":
os.system('cat ${CMSSW_BASE}/src/DQM/SiStripMonitorClient/data/index_template_Express_TKMap.html | sed -e "s@RunNumber@'+str(Run_Number[i])+'@g" > index.html')
else:
os.system('cat ${CMSSW_BASE}/src/DQM/SiStripMonitorClient/data/index_template_TKMap.html | sed -e "s@RunNumber@'+str(Run_Number[i])+'@g" > index.html')
shutil.copyfile(CMSSW_BASE+'/src/DQM/SiStripMonitorClient/data/fedmap.html','fedmap.html')
shutil.copyfile(CMSSW_BASE+'/src/DQM/SiStripMonitorClient/data/psumap.html','psumap.html')
print(" Check TrackerMap on "+str(Run_Number[i])+'/'+Run_type+" folder")
output =[]
output.append(os.popen("/bin/ls ").readline().strip())
print(output)
## Producing the list of bad modules
print(" Creating the list of bad modules ")
os.system('listbadmodule '+filepath+'/'+File_Name+' PCLBadComponents.log')
## if Run_type != "StreamExpress":
## shutil.copyfile(sefile, checkdir+'/'+sefile)
## os.system('/usr/bin/python ${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/findBadModT9.py -p '+sefile+' -s /'+checkdir+'/'+sefile);
## Producing the run certification by lumisection
# print(" Creating the lumisection certification:")
# if (Run_type.startswith("ZeroBias")) or (Run_type == "StreamExpress"):
# os.system('ls_cert 0.95 0.95 '+filepath+'/'+File_Name)
## Producing the PrimaryVertex/BeamSpot quality test by LS..
# if (Run_type != "Cosmics") and ( Run_type != "StreamExpress") and (Run_type != "StreamExpressCosmics"):
# print(" Creating the BeamSpot Calibration certification summary:")
# os.system('lsbs_cert '+filepath+'/'+File_Name)
## .. and harvest the bad beamspot LS with automatic emailing (if in period and if bad LS found)
os.system('bs_bad_ls_harvester . '+str(Run_Number[i]))
## Producing the Module difference for ExpressStream
dest='Beam'
if (Run_type == "Cosmics") or (Run_type == "StreamExpressCosmics"):
dest="Cosmics"
## create merged list of BadComponent from (PCL, RunInfo and FED Errors) ignore for now
os.system('cmsRun ${CMSSW_BASE}/src/DQM/SiStripMonitorClient/test/mergeBadChannel_Template_cfg.py globalTag='+globalTag+' runNumber='+str(Run_Number[i])+' dqmFile='+filepath+'/'+File_Name)
shutil.move('MergedBadComponents.log','MergedBadComponents_run'+str(Run_Number[i])+'.txt')
os.system("mkdir -p /data/users/event_display/TkCommissioner_runs/"+DataLocalDir+"/"+dest+" 2> /dev/null")
shutil.copyfile(detIdInfoFileName,'/data/users/event_display/TkCommissioner_runs/'+DataLocalDir+'/'+dest+'/'+detIdInfoFileName)
os.remove(detIdInfoFileName)
os.remove('MergedBadComponentsTkMap_Canvas.root')
os.remove('MergedBadComponentsTkMap.root')
##############counting dead pixel#######################
print("countig dead pixel ROCs" )
if (Run_Number[i] < 290124) :
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/DeadROCCounter.py '+filepath+'/'+File_Name)
else:
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/DeadROCCounter_Phase1.py '+filepath+'/'+File_Name)
if rereco:
os.system('mkdir -p /data/users/event_display/'+DataLocalDir+'/'+dest+'/'+str(nnnOut)+'/'+str(Run_Number[i])+'/ReReco 2> /dev/null')
else:
os.system('mkdir -p /data/users/event_display/'+DataLocalDir+'/'+dest+'/'+str(nnnOut)+'/'+str(Run_Number[i])+'/'+Run_type+' 2> /dev/null')
shutil.move('PixZeroOccROCs_run'+str(Run_Number[i])+'.txt',workPath+'/PixZeroOccROCs_run'+str(Run_Number[i])+'.txt')
######Counting Dead ROCs and Inefficient DC during the run#########################
if deadRocMap == True:
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/DeadROC_duringRun.py '+filepath+File_Name_online+' '+filepath+File_Name)
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/change_name.py')
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/PixelMapPlotter.py MaskedROC_sum.txt -c')
os.remove('MaskedROC_sum.txt')
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/InefficientDoubleROC.py '+filepath+File_Name_online)
else:
print('No Online DQM file available, Dead ROC maps will not be produced')
print('No Online DQM file available, inefficient DC list will also not be produced')
#######Merge Dead ROCs and Occupoancy Plot ###################
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/MergeOccDeadROC.py '+filepath+File_Name)
#######Merge PCL and DQM Plot (only StreamExpress) ###################
if Run_type=="StreamExpress" or Run_type=="StreamHIExpress":
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/MergePCLDeadROC.py '+filepath+File_Name+' '+filepath+File_Name_PCL)
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/MergePCLFedErr.py '+filepath+File_Name+' '+filepath+File_Name_PCL)
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/PCLOthers.py '+filepath+File_Name+' '+filepath+File_Name_PCL)
###################copy ouput files###################
strip_files = os.listdir('.')
for file_name in strip_files:
full_stripfile_name = os.path.join('.', file_name)
if (os.path.isfile(full_stripfile_name)):
if rereco:
shutil.copy(full_stripfile_name, '/data/users/event_display/'+DataLocalDir+'/'+dest+'/'+str(nnnOut)+'/'+str(Run_Number[i])+'/ReReco')
else:
shutil.copy(full_stripfile_name, '/data/users/event_display/'+DataLocalDir+'/'+dest+'/'+str(nnnOut)+'/'+str(Run_Number[i])+'/'+Run_type)
#########################Start making pixel maps#####################
os.chdir(workPath)
shutil.rmtree(str(Run_Number[i]))
os.remove('index.html')
# produce pixel phase1 TH2Poly maps
# os.chdir(CMSSW_BASE+'/src/DQM/SiStripMonitorClient/scripts/PhaseIMaps/')
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/TH2PolyOfflineMaps.py ' + filepath+'/'+File_Name+' 3000 2000')
shutil.move(workPath+'/PixZeroOccROCs_run'+str(Run_Number[i])+'.txt', 'OUT/PixZeroOccROCs_run'+str(Run_Number[i])+'.txt')
###################copy ouput files##########
pixel_files = os.listdir('./OUT')
for file_name in pixel_files:
full_pixelfile_name = os.path.join('./OUT/', file_name)
if (os.path.isfile(full_pixelfile_name)):
if rereco:
shutil.copy(full_pixelfile_name, '/data/users/event_display/'+DataLocalDir+'/'+dest+'/'+str(nnnOut)+'/'+str(Run_Number[i])+'/ReReco')
else:
shutil.copy(full_pixelfile_name, '/data/users/event_display/'+DataLocalDir+'/'+dest+'/'+str(nnnOut)+'/'+str(Run_Number[i])+'/'+Run_type)
shutil.rmtree('OUT')
# produce pixel phase1 tree for Offline TkCommissioner
pixelTreeFileName = 'PixelPhase1Tree_Run'+str(Run_Number[i])+'_'+Run_type+'.root'
os.system('${CMSSW_BASE}/src/DQM/SiStripMonitorClient/scripts/PhaseITreeProducer.py ' + filepath+'/'+File_Name + ' ' + pixelTreeFileName)
shutil.copyfile(pixelTreeFileName,'/data/users/event_display/TkCommissioner_runs/'+DataLocalDir+'/'+dest+'/'+pixelTreeFileName)
os.remove(pixelTreeFileName)
if File_Name:
os.remove(filepath+File_Name)
if File_Name_online:
os.remove(filepath+File_Name_online)
os.chdir(workPath)
| cms-sw/cmssw | DQM/SiStripMonitorClient/scripts/TkMap_script_phase1.py | TkMap_script_phase1.py | py | 19,429 | python | en | code | 985 | github-code | 50 |
73709087194 | from django.shortcuts import render_to_response
from django.template import loader,Context
from django.http import HttpResponse
from datetime import datetime
import student as stdent
import game
import django.utils.simplejson as json
def question(request,op):
q = dict()
q['first'] = '3'
q['second'] = '4'
q['question_key'] = question.pk
return HttpResponse(json.dumps(q))
def ask(request):
temp = loader.get_template('question.svg')
#context = Context({'question': question })
return render_to_response(temp.render(Context({})))
def store_answer_and_ask_new_question(request):
# store answer
student = stdent.models.Student.objects.get(user=request.user)
question = game.models.Question.objects.get(pk=request.POST['question_key'])
answer = game.models.Answer()
answer.student = student
answer.question = question
answer.value = request.POST['answer']
answer.time_taken = datetime.now() - datetime.now()
answer.correct = answer.value == answer.question.correct_answer
answer.save()
return ask(question.select_question(student))
| bernardokyotoku/skillplant | game/views.py | views.py | py | 1,053 | python | en | code | 1 | github-code | 50 |
16906175053 | from home.models import Urls
from django.db.models import Q
def buscador(request):
"""CARREGA O BUSCADOR NO TEMPLATE BASE"""
if 'search' in request.GET:
search = request.GET['search']
else:
search = ''
if search == '':
urls = Urls.objects.all().distinct()
else:
urls = Urls.objects.filter(Q(urls__icontains=search) | Q(nome__icontains=search) | Q(descricao__icontains=search)).order_by('nome')
return {'urls' : urls, 'search':search}
def menu(request):
"""CARREGA AS PÁGINAS NO MENU"""
clientes = Urls.objects.filter(grupo='clientes').distinct()
pacientes = Urls.objects.filter(grupo='pacientes').distinct()
administrativo = Urls.objects.filter(grupo='administrativo').distinct()
parceiros = Urls.objects.filter(grupo='parceiros').distinct()
agendamentos = Urls.objects.filter(grupo='agendamentos').distinct()
return {'clientes' : clientes,
'pacientes' : pacientes,
'administrativo' : administrativo,
'parceiros' : parceiros,
'agendamentos' : agendamentos
} | RafaelMunareto/vet_system_django_puro | vet_system_django_puro/apps/processors/context_processors.py | context_processors.py | py | 1,154 | python | en | code | 0 | github-code | 50 |
27821371989 | from PIL import Image as img
import math
import json
def crop_image(filename):
# This function is called from the main.py and crops the image.
# Once the image is cropped, it is saved to a file on the users
# network.
foto = img.open(filename)
size = width, height = foto.size
foto_stdr_save_location = 'standard_save_location/' # Standard folder to save cropped image to if network folder is not set
# crop with pillow tuple, (x1, y1, x2, y2)
toppixel = -5 # y1
leftpixel = -5 # x1
rightpixel = -5 # x2
bottomrow = size[1] # y2
with open('config.json', 'r') as crop_config: # load configuration from config.json
data = json.load(crop_config)
padding = data['crop_config']['padding'] # user config, in px
thresholdValue = data['crop_config']['thresholdValue'] # user config, color must be atleast this % different
fust_color = data['crop_config']['fust_color'] # user config, color of a fust in RGB
color_differentiation = data['crop_config']['color_differentiation'] # user config, as % color differentiation
fust_edge_length = data['crop_config']['fust_edge_length'] # user config, length of detection for fust
ratio = data['crop_config']['ratio'] # user config, image output ratio
network_folder = data['crop_config']['network_folder'] # user config, save location for cropped images
threshold = 255 * ( 1 - (thresholdValue / 100) )
max_color = max(1, fust_color[0],fust_color[1], fust_color[2])
color_relation = (fust_color[0]/max_color,fust_color[1]/max_color, fust_color[2]/max_color)
fust_edge_counter = 0
foto_rgb_list = list(foto.getdata())
# find left, right and top pixels that are under the threshold value
for i in range(len(foto_rgb_list)):
y = math.floor(i / size[0])
x = i - y * size[0]
pixel = foto_rgb_list[i]
for p in range(0,3):
if pixel[p] <= threshold:
if toppixel < 0:
toppixel = y
if leftpixel < 0 or x < leftpixel:
leftpixel = x
if rightpixel < 0 or x > rightpixel:
rightpixel = x
break
if bottomrow == size[1]:
delta_max = max(1, pixel[0], pixel[1], pixel[2])
delta_color = (pixel[0]/delta_max,pixel[1]/delta_max, pixel[2]/delta_max)
count = 0
for p in range(0,3):
if delta_color[p] <= color_relation[p] + color_differentiation and delta_color[p] >= color_relation[p] - color_differentiation:
count += 1
if count == 3:
fust_edge_counter += 1
else:
fust_edge_counter = 0
if fust_edge_counter > fust_edge_length:
bottomrow = y
elif leftpixel >= 0 and rightpixel >= 0: # ignore inbetween space
# i += rightpixel - leftpixel
break
# Adding padding for cropped image
leftpixel -= padding
rightpixel += padding
toppixel -= padding
bottomrow += padding
# Add padding for ratio
crop_width = rightpixel - leftpixel
crop_height = bottomrow - toppixel
crop_ratio = crop_width / crop_height
user_ratio = ratio[0] / ratio[1]
print('crop r', crop_ratio)
print('user r', user_ratio)
delta_ratio = crop_ratio - user_ratio
if delta_ratio > 0:
p = crop_height * delta_ratio
print('delta r > 0, p = ',p)
toppixel -= math.floor(p/2)
bottomrow += math.ceil(p/2)
elif delta_ratio < 0:
p = crop_width * -delta_ratio
print('delta r < 0, p = ',p)
rightpixel += math.floor(p/2)
leftpixel -= math.ceil(p/2)
if leftpixel < 0:
leftpixel = 0
if rightpixel > size[0]-1 or rightpixel < 0:
rightpixel = size[0]-1
if toppixel < 0:
toppixel = 0
if bottomrow > size[1]:
bottomrow = size[1]
foto_cropped = foto.crop((leftpixel, toppixel, rightpixel, bottomrow))
if not network_folder:
foto_cropped.save(foto_stdr_save_location + filename)
else:
foto_cropped.save(network_folder + filename) | roytouw/bloemenfotografie | crop_foto.py | crop_foto.py | py | 4,414 | python | en | code | 0 | github-code | 50 |
23997195357 | import bpy
import os
class Operator_BlenRig5_Add_Biped(bpy.types.Operator):
bl_idname = "blenrig5.add_biped_rig"
bl_label = "BlenRig 5 Add Biped Rig"
bl_description = "Generates BlenRig 5 biped rig"
bl_options = {'REGISTER', 'UNDO',}
@classmethod
def poll(cls, context): #method called by blender to check if the operator can be run
return bpy.context.scene != None
def import_blenrig_biped(self, context):
CURRENT_DIR = os.path.dirname(__file__)
filepath = os.path.join(CURRENT_DIR, "blenrig_biped.blend")
group_name = "blenrig_biped"
scene = bpy.context.scene
# append all groups from the .blend file
with bpy.data.libraries.load(filepath, link=False) as (data_from, data_to):
## all groups
# data_to.groups = data_from.groups
# only append a single group we already know the name of
data_to.groups = [group_name]
# add the group instance to the scene
for group in data_to.groups:
for ob in group.objects:
scene.objects.link(ob)
#assign layers
if ob.type == 'MESH':
ob.layers = [(x in [19]) for x in range(20)]
if 'BlenRig_mdef_cage' in ob.name:
ob.layers = [(x in [11]) for x in range(20)]
if 'BlenRig_proxy_model' in ob.name:
ob.layers = [(x in [1]) for x in range(20)]
if ob.type == 'LATTICE':
ob.layers = [(x in [12]) for x in range(20)]
if ob.type == 'ARMATURE':
ob.layers = [(x in [10]) for x in range(20)]
bpy.context.scene.layers[10] = True
# Commented because it caused an error when in object local view and when lock_camera_and_layers is false
# if 'biped_blenrig' in ob.name:
# bpy.context.scene.objects.active = ob
# bpy.ops.object.mode_set(mode='POSE')
def execute(self, context):
self.import_blenrig_biped(context)
return{'FINISHED'} | JT-a/blenderpython279 | scripts/addons_extern/BlenRig5/blenrig_biped/ops_blenrig_biped_add.py | ops_blenrig_biped_add.py | py | 2,295 | python | en | code | 5 | github-code | 50 |
74974812315 | import sublime
import sublime_plugin
try:
from is_pretext_file import is_pretext_file
except ImportError:
from .is_pretext_file import is_pretext_file
PRETEXT_SYNTAX = 'Packages/PreTeXtual/PreTeXt.sublime-syntax'
class PretextSyntaxListener(sublime_plugin.EventListener):
def on_load_async(self, view):
self.detect_and_apply_syntax(view)
def on_post_save_async(self, view):
self.detect_and_apply_syntax(view)
def detect_and_apply_syntax(self, view):
if view.is_scratch() or not view.file_name():
return
file_name = view.file_name()
if is_pretext_file(file_name):
view.set_syntax_file(PRETEXT_SYNTAX)
if view.score_selector(0, "text.xml.pretext"):
return
| daverosoff/PreTeXtual | pretextSyntaxListener.py | pretextSyntaxListener.py | py | 765 | python | en | code | 1 | github-code | 50 |
37211511047 | import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
months = ['january', 'february', 'march', 'april', 'may', 'june']
days= ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city = input("Please enter the name of a city from one of those cities [chicago, new york city, washington] ").lower().strip()
if city not in CITY_DATA:
print("Invalid input! Please enter the name of one of the stated cities")
continue
else:
break
# TO DO: get user input for month (all, january, february, ... , june)
while True:
month= input('Which month to filter by [January, February, March, April, May, June] or type all to apply no filter: ')
month=month.lower().strip()
if month not in months and month !='all':
print("Invalid input! Please enter a month from [January to June] or type all to apply no filter")
continue
else:
break
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day= input('which day of week to filter by [Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday], or "all" to apply no day filter: ').lower().strip()
if day not in days and day!='all':
print("Invalid input! Please enter a day from [Sunday to Saturday] or type all to apply no filter")
continue
else:
break
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.strftime("%A")
df['day_of_week']=df['day_of_week'].str.lower()
#calendar.day_name[(df['Start Time'].dt.week)]
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = [x+1 for x in range(len(months)) if months[x]==month][0]
# filter by month to create the new dataframe
df = df[df['month']==month]
#print(df.head())
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week']==day]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
most_common_month=df['month'].mode()[0]
print("most common month is {}".format(most_common_month))
# TO DO: display the most common day of week
most_common_day=df['day_of_week'].mode()[0]
print("most common day of week is {}".format(most_common_day))
# TO DO: display the most common start hour
most_common_start_hour=df['Start Time'].dt.hour.mode()[0]
print("most common start hour is {}".format(most_common_start_hour))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
most_common_start_station=df['Start Station'].mode()[0]
print("most common used start station is {}".format(most_common_start_station))
# TO DO: display most commonly used end station
most_common_end_station=df['End Station'].mode()[0]
print("most common used end station is {}".format(most_common_end_station))
# TO DO: display most frequent combination of start station and end station trip
most_common_start_end_station=(df['Start Station']+'--'+df['End Station']).mode()[0]
print("most most frequent combination of start station and end station trip is {}".format(most_common_start_end_station))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
total_travel_time=df['Trip Duration'].sum()
print("total travel time is {} ,".format(total_travel_time))
# TO DO: display mean travel time
mean_travel_time=df['Trip Duration'].mean()
print("average travel time is {}".format(mean_travel_time))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
unique_user_type_list=df["User Type"].value_counts()
print("count of user types is \n")
for user_type,count in unique_user_type_list.iteritems():
print(str(user_type)+" "+str(count))
if 'Gender' not in df:
print("There are no Gender and Birth information for Washington")
return
# TO DO: Display counts of gender
gender_list=df["Gender"].value_counts()
print("count of user types is \n")
for gender,count in gender_list.iteritems():
print(str(gender)+" "+str(count))
# TO DO: Display earliest, most recent, and most common year of birth
earlist_year_of_birth=df['Birth Year'].min()
print("Earlist year of birth is {}".format(earlist_year_of_birth))
most_recent_year_of_birth=df['Birth Year'].max()
print("most recent year of birth is {}".format(most_recent_year_of_birth))
most_common_year_of_birth=df['Birth Year'].mode()[0]
print("most common year of birth is {}".format(most_common_year_of_birth))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_data(df):
idx=0
while(True):
view_more_data=input('\nWould you like to view 5 rows of individual trip data? Enter yes or no\n').lower().strip()
if view_more_data=="yes":
print(df.iloc[idx:idx+5])
idx+=5
elif view_more_data=="no":
break
else:
print("Invalid Input! Please enter Yes or No\n")
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
display_data(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| omarsherif200/Udacity_Data_Analysis_Nanodegree | Explore US BikeShare Dataset/bikeshare.py | bikeshare.py | py | 8,128 | python | en | code | 0 | github-code | 50 |
75027697113 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from lib.lib_config import get_config_var
from lib.log import log
import logging
from subprocess import Popen, PIPE, STDOUT, call
import sys
import os
from lib.lib_config import get_config_var
level = get_config_var("log_level")
log_path = get_config_var("log_path")
log("skstack.log", level, log_path)
def adv_task_step(hosts,env,project,task_file,forks=30):
proj_base_dir = get_config_var("pro_path")
task_file_abs = proj_base_dir+env+"/"+project+"/"+task_file
if hosts == "localhost":
cmd = "bash" + " " + task_file_abs
else:
cmd = "ansible %s -f %s -m script -a %s" % (hosts,forks,task_file_abs)
try:
pcmd = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
retcode=pcmd.wait()
retcode_message=pcmd.communicate()
except:
exinfo=sys.exc_info()
logging.error(exinfo)
if retcode==0:
ret_message="success"
else:
ret_message="failed"
return ret_message
# else:
# ret_message="failed"
# return ret_message
def release_project(project,env,hosts,release_dir,release_to):
release_path = get_config_var("release_path")
project_dir = release_path+env+"/"+project+"/"
release_palybook=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+ "/scripts/skdeploy/release_project.yml"
cmd= "ansible-playbook %s -e 'h=%s project_dir=%s release_dir=%s'" % (release_palybook,hosts,project_dir, release_dir)
try:
pcmd = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
retcode=pcmd.wait()
ret_message = pcmd.communicate()
if retcode != 0:
logging.error(ret_message)
else:
logging.info(ret_message)
except:
exinfo=sys.exc_info()
logging.error(exinfo)
if retcode==0:
ret_message="success"
else:
ret_message="failed"
return ret_message
def change_link(hosts,release_dir,release_to):
change_link_palybook=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+ "/scripts/skdeploy/change_link.yml"
cmd= "ansible-playbook %s -f 30 -e 'h=%s release_dir=%s release_to=%s'" % (change_link_palybook,hosts, release_dir,release_to)
try:
pcmd = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
retcode=pcmd.wait()
ret_message = pcmd.communicate()
if retcode != 0:
logging.error(ret_message)
else:
logging.info(ret_message)
except:
exinfo=sys.exc_info()
logging.error(exinfo)
if retcode==0:
ret_message="success"
else:
ret_message="failed"
return ret_message
def uni_to_str(args):
obj_uni = args
obj_list = eval(obj_uni)
obj_str=""
for h in obj_list:
h=h.strip()
obj_str=obj_str+h+","
return obj_str
def create_release_path(hosts,path):
cmd= "ansible %s -m shell -a 'mkdir -p %s'" % (hosts,path)
try:
pcmd = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
retcode=pcmd.wait()
ret_message = pcmd.communicate()
if retcode != 0:
logging.error(ret_message)
else:
logging.info(ret_message)
except:
exinfo=sys.exc_info()
logging.error(exinfo)
if retcode==0:
ret_message="success"
else:
ret_message="failed"
return ret_message
def var_change(str,**pDic):
str=str.replace("{repo_path}",pDic["repo_path"])
str=str.replace("{pre_release_path}",pDic["pre_release_path"])
str=str.replace("{release_to}",pDic["release_to"])
str=str.replace("{release_lib}",pDic["release_lib"])
str=str.replace("{project}",pDic["project"])
str=str.replace("{env}",pDic["env"])
str=str.replace("{repo_url}",pDic["repo_url"])
str=str.replace("{release_user}",pDic["release_user"])
return str
if __name__ == "__main__":
result_pre_deploy = adv_task_step(hosts="localhost", env="prod", project="yyappgw", task_file="post_deploy.sh")
print(result_pre_deploy)
print(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+ "/script/skdeploy/release_project.yml")
| encodingl/skstack | lib/lib_skdeploy.py | lib_skdeploy.py | py | 4,687 | python | en | code | 4 | github-code | 50 |
38324008926 | import pandas as pd
import pickle
import re
import string
import nltk
from nltk.corpus import wordnet
from wordcloud import WordCloud
from nltk.stem import WordNetLemmatizer
import scattertext as st
import spacy
import numpy as np
import pyLDAvis
import pyLDAvis.sklearn
from textblob import TextBlob
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation, NMF
democrats = ["Pete Buttigieg", "Joe Biden", "Amy Klobuchar", "Bernie Sanders", "Elizabeth Warren", "Andrew Yang"]
add_stop_words = ['just', 'like', 'got', 'things', 'thing', 'thats', 'know', 'said', 'going', 'dont', 'sure', 'mr', 'let', 'gon', 'na', 'say', 'want', 'year', 'time', 'end', 'way', 'talk', 'ive', 'im', 'tell', 'think', 'lot', 'mean', 'day', 'make', 'wait', 'right', 'youre', 'come', 'bring', 'theyre', 'ready', 'yeah', 'yes', 'buttigieg', 'klobuchar', 'yang', 'sander', 'warren', 'biden', 'people', 'country', 'oh', 'in', 'aa']
# Reads all the pickles of the democrats and puts the text into a list
def read_pickle_transcript():
data = {}
for i, c in enumerate(democrats):
with open("transcripts/" + c + ".txt", "rb") as file:
data[c] = pickle.load(file)
return data
# Combies all the text
def combine_text(list_of_text):
'''Takes a list of text and combines them into one large chunk of text.'''
combined_text = ''.join(list_of_text)
return combined_text
# Creates a dataframe of the transcripts
def create_dataframe_transcripts(data):
data_combined = {key: [combine_text(value)] for (key, value) in data.items()}
pd.set_option('max_colwidth',150)
democrats.sort()
df = pd.DataFrame.from_dict(data_combined).transpose()
df.columns = ['transcript']
df = df.sort_index()
df['politician'] = democrats
return df
# "Cleans" the data (removes unnecessary stuff)
def clean_data(text):
""" Clean data part 1: Lower case, """
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\w*\d\w*', '', text)
text = re.sub('[‘’“”…]', '', text)
text = re.sub('\n', ' ', text)
text = re.sub('\t', '', text)
text = re.sub('\b', '', text)
text = re.sub('[^a-z ]+', '', text)
text = re.sub('\s\s+', ' ', text)
return text
# For lemmatization we use WordNet, but we need to POS-tag the tokenized
# words for a more accurate lemmatization
def get_wordnet_pos(word):
"""Map POS tag to first character lemmatize() accepts"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
# Lemmatizes a transcript
def lemmatize_transcript(text):
''' Tokenizes text and for each tokenized word, it applies a POS-tag and lemmatizes the word
Returns the lemmatized output as a string
'''
lemmatizer = WordNetLemmatizer()
lemmatized_output = ' '.join([lemmatizer.lemmatize(word, get_wordnet_pos(word)) for word in nltk.word_tokenize(text)])
return lemmatized_output
# Puts the data in a dataframe
def put_in_dataframe(data_combined):
pd.set_option('max_colwidth',150)
democrats.sort()
df = pd.DataFrame.from_dict(data_combined).transpose()
df.columns = ['transcript']
df = df.sort_index()
df['politician'] = democrats
return df
# Converts from corpus to pandas dataframe
def convert_dataframe(cv, stop_words, data_clean_lemma):
data_cv = cv.fit_transform(data_clean_lemma.transcript)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_clean_lemma.index
return data_dtm
# Reads the pickle dtm.pkl
def read_data():
data = pd.read_pickle('dtm.pkl')
data = data.transpose()
return data
def run():
# Sets the data from the pickle
data = read_pickle_transcript()
# Combines the data
data_combined = {key: [combine_text(value)] for (key, value) in data.items()}
# Data is put in a pandas dataframe, this form is called Corpus
df = put_in_dataframe(data_combined)
# Cleans data
data_clean = pd.DataFrame(df.transcript.apply(lambda x: clean_data(x)))
# Lemmatizes the transcript
data_clean_lemma = pd.DataFrame(data_clean.transcript.apply(lambda x: lemmatize_transcript(x)))
# Adds the stop words manually
stop_words = text.ENGLISH_STOP_WORDS.union(add_stop_words)
# Vectorizes the stop words
cv = CountVectorizer(stop_words=stop_words)
# Sets the format from corpus to another dataframe (1 coloumn for each word)
data_dtm = convert_dataframe(cv, stop_words, data_clean_lemma)
# Pickle the data
data_dtm.to_pickle("dtm.pkl")
# Pickle data_clean_lemme
data_clean_lemma.to_pickle('data_clean_lemma.pkl')
pickle.dump(cv, open("cv.pkl", "wb"))
# Reads the data from the pickle
data = read_data()
data_clean_lemma = pd.read_pickle('data_clean_lemma.pkl')
run() | WSHuusfeldt/PythonSem4Eksamen | Clean_data.py | Clean_data.py | py | 5,207 | python | en | code | 0 | github-code | 50 |
11283935248 | # Herencia
# SUPERCLASE
class Producto:
def __init__(self, referencia, nombre, pvp, descripcion):
self.referencia = referencia
self.nombre = nombre
self.pvp = pvp
self.descripcion = descripcion
def __str__(self):
return """\
REFERENCIA\t{}
NOMBRE\t{}
PVP\t{}
DESCRIPCIÓN\t{}""".format(self.referencia, self.nombre, self.pvp, self.descripcion)
class Adorno(Producto): # Para indicar que es hija del producto ponemos nombre y pass
pass
class Alimento(Producto): # Nueva clase con atributos no declarados en init
productor = ""
distribuidor = ""
def __str__(self): # La volvemos a instanciar para agregar los nuevos atributos
return """\
REFERENCIA\t{}
NOMBRE\t{}
PVP\t{}
DESCRIPCIÓN\t{}
PRODUCTOR\t{}
DISTRIBUIDOR {}""".format(self.referencia, self.nombre, self.pvp, self.descripcion, self.productor, self.distribuidor)
class Libro(Producto):
isbn = ""
autor = ""
def __str__(self):
return """\
REFERENCIA\t{}
NOMBRE\t{}
PVP\t{}
DESCRIPCIÓN\t{}
ISBN\t{}
AUTOR\t{}""".format(self.referencia, self.nombre, self.pvp, self.descripcion, self.isbn, self.autor)
# Intanciamos el objeto
a = Adorno(2033, 'Vaso Adornado', 5, 'Vaso de pocerlana y marmol')
print("_________________________________________________________________")
# Instanciamos el alimento
al = Alimento(2888, "Botella de Aceite", 5, "250ml")
al.productor = "La Marca"
al.distribuidor = "Distribución S.A DE C.V"
print("_________________________________________________________________")
lib = Libro(8477, 'Things have gotten worse since we last spoke', 45, 'Libro sobre canibalismo')
lib.isbn = '3782BN'
lib.autor = 'UNKOWN'
print("_____________________Imprimimos los 3_________________________")
# Polimorfismo
productos = [a, al]
productos.append(lib)
for p in productos:
print(p, "\n")
# Para mostrarlos con atributos privados de clases normales
print("Mostrarlos por atributo con atributos generales \n")
for p in productos:
if isinstance(p, Adorno): # Comprobar si un objeto {p} es del tipo Adorno
print(p.referencia, p.nombre)
elif isinstance(p, Alimento):
print(p.referencia, p.nombre, p.productor)
elif isinstance(p, Libro):
print(p.referencia, p.nombre, p.isbn)
print("\n Eso nos permitira mostrar todas las categorías \n")
# Eso nos permitira mostrar todas las categorías
for p in productos:
print(p.referencia, p.nombre)
# Podemos crear funciones para manejar los atributos de las instancias
# Mediante polimorfirmo como se ve en el método siguiente ya que
# Recibe objetos de distintas clases y accede al atributo pvp dando por entendido que existe en todos lados
def rebajar_producto(p, rebaja): # Devuelve un producto con una rebaja en porcentaje de su precio
# p se pasa por referencia (si se cambia el valor total al original)
p.pvp = p.pvp - (p.pvp / 100 * rebaja)
return p
print("\nAlimento modificado sus atributos\n")
alimentoRebajado = rebajar_producto(al, 10)
print(alimentoRebajado)
# COPIAR UN OBJETO QUE QUEREMOS MODIFICAR SIN QUE SE CAMBIE EL ORIGINAL
# import copy
# copia_adorno = copy.copy(adorno) y con este trabajamos
| Nivaniz/Cursos | Python/GeneralCode/ComandosCatorce.py | ComandosCatorce.py | py | 3,193 | python | es | code | 0 | github-code | 50 |
5627607612 | import time
from skimage import measure
from SciProjects.imaging import pull_data, preprocess
from SciProjects.imaging.scrape_info import get_mapping
from SciProjects.imaging.algorithm import *
root = "/home/csa/tmp/PIC/"
oldpics = "/home/csa/Dohany_kepanalizis/"
source = "/home/csa/tmp/jpegs/"
annotpath = "/home/csa/tmp/annotated/"
SCALE = 0.0085812242798
MINAREA = 10000
MINHEIGHT = 1000
def run(randomize=False, verbose=1, **kw):
outchain = "PIC\tSMPL\tRPLC\tPRP\tFITPOLY\tFITMXWD\tFITAREA\n"
pics = pull_data(source=source, randomize=randomize, verbose=verbose)
mapping = get_mapping(root)
saveroot = kw.get("savepath", "")
for i, (pic, path) in enumerate(pics, start=1):
assert len(path) == 2, "Died: path: {}".format(path)
smplnm, prll = mapping[path[1]]
lpic = preprocess(pic, dist=False, pictitle=path[1])
kw["lpic"] = lpic
# kw["savepath"] = annotpath + path[-1]
kw["labeltup"] = pic, path
if "savepath" in kw:
kw["savepath"] = saveroot + "annot_" + path[1] + ".png"
prps = sorted([prp for prp in measure.regionprops(lpic) if prp.area > MINAREA
and prp.image.shape[0] > MINHEIGHT], key=lambda p: p.bbox[0])
results = np.array([algo(prps, **kw) for algo in algorithms])
assert results.ndim == 2
for j, res in enumerate(results.T, start=1):
outchain += "\t".join((path[1], smplnm + "_", prll, str(i))) + "\t"
outchain += "\t".join(res.astype(str)).replace(".", ",")
outchain += "\n"
with open("log.txt", "w") as handle:
# handle.write("\n{}\n".format(datetime.datetime.now().strftime("-- %Y.%m.%d_%H.%M.%S")))
handle.write(outchain)
if __name__ == '__main__':
start = time.time()
run(show=False, deg=5, SCALE=SCALE, randomize=True, savepath="/home/csa/tmp/annotated/")
print("Run took {:.3f} seconds!".format(time.time()-start))
| csxeba/SciProjects | imaging/xperiment.py | xperiment.py | py | 1,963 | python | en | code | 1 | github-code | 50 |
6060998896 |
from flask import Flask
import requests
import json
import os
app = Flask(__name__)
#query to try and purchase a book by its ID if it is available in stock
@app.route("/purchase/<item_number>", methods=['GET'])
def purchaseCatServer(item_number):
# check quantity in stock
url = os.environ['CATALOG']+"/info/"+item_number
msg =requests.get(url)
if msg.content.decode() == "Item not found :(":
return msg.content
quantity = int(msg.json()['quantity'])
if quantity > 0:
#if available in stock, update the quantity from the catalog server
url = os.environ['CATALOG']+"/update/"+item_number
#url2=os.environ['CATALOG2']+"/update/"+item_number
bookName =requests.get(url).content
s = "Purchase complete" + bookName.decode('UTF-8')
return json.dumps(s), 200, {'ContentType': 'application/json'}
else:
#if not in stock, return failure message
return json.dumps("Purchase failed out of stock"), 400, {'ContentType': 'application/json'}
#if __name__== "__main__":
# app.run(host="0.0.0.0") | Ahmad-Qerem/DOS_PROJECT | Order/app.py | app.py | py | 1,112 | python | en | code | 0 | github-code | 50 |
29645019753 | import aiohttp
import asyncio
import discord
import re
import sqlite3
import threading
from bs4 import BeautifulSoup
from datetime import datetime
from discord.ext import commands
from ext.utils import utils, checks
from settings import *
class APBDB2:
def __init__(self, bot):
self.bot = bot
self.connection = sqlite3.connect(DATABASE)
self.c = self.connection.cursor()
self.news_feed_lock = asyncio.Lock()
self.news_feed = bot.loop.create_task(self.news_feed())
def __del__(self):
self.news_feed.cancel()
API_URL = 'https://db.apbvault.net/beacon/'
@commands.command(name='db')
async def db(self, ctx, *, query: str):
await ctx.trigger_typing()
detail = False
if '-detail' in query:
detail = True
query = query.replace('-detail', '')
search = await self.item_search(query)
if search[0] is True:
item = await utils.api_request(self.API_URL + 'items/{}'.format(search[1]['sapbdb']))
e = discord.Embed(title=str(item['item_detail']['sdisplayname']),
description=str(item['item_detail']['sdescription']),
url=str(item['url']),
color=0xFF0000)
if item['Category'] == 'Modifications':
for effect in item['ModifierEffects']:
value = '**Multiplier:** {} \n**Add:** {}'.format(str(effect['feffectmultiplier']), str(effect['faddtoresult']))
e.add_field(name=await self.rem_color_code(str(effect['sdescription'])), value=value)
elif item['Category'] == 'Vehicles':
e.add_field(name='Max Health', value='{} '.format(item['VehicleSetupType']['nmaxhealth']))
e.add_field(name='Max Speed', value='{} m/s'.format(item['VehicleSetupType']['fmaxspeed']))
e.add_field(name='Max Reverse Speed', value='{} m/s'.format(item['VehicleSetupType']['fmaxreversespeed']))
e.add_field(name='Max Explosion Damage', value='{} at {} cm'.format(item['Explosions']['ndamage'], round(item['Explosions']['fgroundzeroradius'], 0)))
e.add_field(name='Cargo Capacity', value='{}'.format(item['VehicleSetupType']['nmaincargopipcapacity']))
e.add_field(name='Drive Type', value=await self.drive_type(item['VehicleSetupType']['edrivetype']))
elif item['Category'] == 'Weapons':
try:
e.add_field(name='TTK / STK', value='{} sec / {}'.format(round(item['calculated']['timetokill_effect'], 2), item['calculated']['shottokill_effect']))
e.add_field(name='TTS / STS', value='{} sec / {}'.format(round(item['calculated']['timetostun_effect'], 2), item['calculated']['shottostun_effect']))
except:
e.add_field(name='TTK / STK', value='{} sec / {}'.format(round(item['calculated']['timetokill'], 2), item['calculated']['shottokill']))
e.add_field(name='TTS / STS', value='{} sec / {}'.format(round(item['calculated']['timetostun'], 2), item['calculated']['shottostun']))
if detail is True:
e.add_field(
name='DMG / STM / HRD',
value='{} / {} / {}'.format(
item['WeaponType']['fhealthdamage'],
item['WeaponType']['fstaminadamage'],
round((item['WeaponType']['fharddamagemodifier'] * item['WeaponType']['fhealthdamage']), 2)
)
)
e.add_field(
name='MAG / POOL',
value='{} / {}'.format(
item['WeaponType']['nmagazinecapacity'],
item['WeaponType']['nammopoolcapacity']
),
)
e.set_thumbnail(url=str(item['icon_url']))
await ctx.send(embed=e)
else:
await ctx.send(embed=discord.Embed(
title='Error',
description='Item not found. Your query: "{}"'.format(query),
color=0xFF0000
))
@commands.group(name='apb')
async def apb(self, ctx):
if ctx.invoked_subcommand is None:
pass
@apb.group(name='feed', no_pm=True)
@checks.can_manage()
async def apb_feed(self, ctx):
if ctx.invoked_subcommand is None:
pass
@apb_feed.command(name='mod', no_pm=True)
async def feed_mod(self, ctx, *, mods: bool):
try:
if mods is True:
self.c.execute('UPDATE apb_news_feed SET ShowMods = 1 WHERE ID = ?', [ctx.message.guild.id])
desc = 'Enabled moderators in news feed.'
else:
self.c.execute('UPDATE apb_news_feed SET ShowMods = 0 WHERE ID = ?', [ctx.message.guild.id])
desc = 'Disabled moderators in news feed.'
except Exception as e:
await ctx.send(embed=discord.Embed(title='Error', description=str(e), color=0xFF0000))
else:
self.connection.commit()
await self.apb_e(ctx, 'News Feed', description=desc)
@apb_feed.command(name='set', no_pm=True)
async def feed_set(self, ctx, *, post: int):
try:
self.c.execute('UPDATE apb_news_feed SET PostID = ? WHERE ID = ?', (post, ctx.message.guild.id))
except Exception as e:
await ctx.send(embed=discord.Embed(title='Error', description=str(e), color=0xFF0000))
else:
self.connection.commit()
await self.apb_e(ctx, 'News Feed', description='ID set to {}.'.format(post))
@apb_feed.group(name='channel', no_pm=True)
async def feed_channel(self, ctx):
if ctx.invoked_subcommand is None:
pass
@feed_channel.command(name='remove', no_pm=True)
async def channel_remove(self, ctx):
try:
self.c.execute('DELETE FROM apb_news_feed WHERE ID = ?', [ctx.message.guild.id])
except Exception as e:
await ctx.send(embed=discord.Embed(title='Error', description=str(e), color=0xFF0000))
else:
self.connection.commit()
await self.apb_e(ctx, 'News Feed', 'Channel removed from this server.')
@feed_channel.command(name='set', no_pm=True)
async def channel_set(self, ctx):
try:
id = await utils.api_request(self.API_URL + 'tracker?limit=1')
self.c.execute('INSERT INTO apb_news_feed VALUES (?, ?, ?, ?)',
(ctx.message.guild.id, ctx.message.channel.id, id[0]['p_id'], 1))
except sqlite3.IntegrityError as e:
self.c.execute('UPDATE apb_news_feed SET ChannelID = ? WHERE ID = ?', (ctx.message.channel.id, ctx.message.guild.id))
self.connection.commit()
await self.apb_e(ctx, 'News Feed', 'APB news Feed channel updated.')
except Exception as e:
await ctx.send(embed=discord.Embed(title='Error', description=str(e), color=0xFF0000))
else:
self.connection.commit()
await self.apb_e(ctx, 'News Feed', 'Channel set.')
@commands.command(name='role', no_pm=True)
async def role(self, ctx, role: str):
roles = ['Citadel', 'Jericho', 'Han', 'Nekrova']
try:
if role.lower() == 'citadel':
role = discord.utils.get(ctx.message.guild.roles, name='Citadel')
await self.manage_role(ctx, role)
elif role.lower() == 'jericho':
role = discord.utils.get(ctx.message.guild.roles, name='Jericho')
await self.manage_role(ctx, role)
elif role.lower() == 'nekrova':
role = discord.utils.get(ctx.message.guild.roles, name='Nekrova')
await self.manage_role(ctx, role)
elif role.lower() == 'han':
role = discord.utils.get(ctx.message.guild.roles, name='Han')
await self.manage_role(ctx, role)
except Exception as e:
await ctx.send('Error: {}'.format(e))
async def manage_role(self, ctx, role):
if role:
if any(role == rol for rol in ctx.message.author.roles):
await ctx.message.author.remove_roles(role)
await ctx.send('Removed {} role from {}.'.format(role, ctx.message.author))
else:
await ctx.message.author.add_roles(role)
await ctx.send('Added {} role to {}.'.format(role, ctx.message.author))
else:
await ctx.send('Error: {} role not found.'.format(role))
async def news_feed(self):
print('[bg] APB News Feed active')
while not self.bot.is_closed():
self.c.execute('SELECT ID, ChannelID, PostID, ShowMods FROM apb_news_feed')
servers = self.c.fetchall()
print("[DEBUG][nf] Checking {0} servers...".format(len(servers)))
for server in servers:
guild = self.bot.get_guild(int(server[0]))
if guild is None:
print("[DEBUG][nf] Could not get guild with ID {}".format(server[0]))
continue
if int(server[1]) is not 0:
print("[DEBUG] Getting channel {0} for guild {1}".format(server[1], guild))
try:
channel = guild.get_channel(int(server[1]))
except discord.NotFound:
continue
print("[DEBUG] Guild {0} using channel {1}".format(guild, channel))
if channel is None:
print("[DEBUG][nf] Could not get channel for guild {}".format(guild))
continue
else:
continue
print("[DEBUG][nf] [{0}] Channel found: {1}".format(guild, channel))
postID = server[2]
mods = server[3]
if mods is 0:
request_url = self.API_URL + 'tracker?mod=False¤tid={}'.format(postID)
else:
request_url = self.API_URL + 'tracker?currentid={}'.format(postID)
print("[DEBUG][nf] [{0}] Attempting API request: {1}".format(guild, request_url))
posts = []
posts = await utils.api_request(request_url)
try:
if posts > 200 and posts < 200:
print("[DEBUG][nf] Error: Request did not return JSON - Status: {}".format(posts))
continue
except:
pass
print('[DEBUG][nf] [{0}] Processing {1} new posts in {2}.'.format(guild, len(posts), channel))
if len(posts) > 0 and not isinstance(posts, int):
for post in reversed(posts):
# separate the quote message and the admin message with BeautifulSoup
soup = BeautifulSoup(post['p_description'], 'html.parser')
desc_quote = soup.find('blockquote')
if desc_quote is not None:
desc_quote = soup.find('blockquote').get_text()
for tag in soup.find_all('blockquote'):
tag.replaceWith('')
desc_admin = soup.find('div').get_text()
else:
desc_admin = soup.find('div').get_text()
# Remove HTMl and non-ASCII symbols
desc_admin = await self.rem_color_code(desc_admin)
desc_admin = (''.join([i if ord(i) < 128 else '' for i in desc_admin]))
# combine final description (only admin rn)
desc = desc_admin
# Check if message is too long > add "Read more..." with link if too long
if len(desc) >= 1000:
desc = desc[:1000]
desc += '\n [Read more...]({})'.format(post['p_link'])
e = discord.Embed(
title=post['p_thread'],
description=desc,
url=post['p_thread_link'],
color=0xFF0000,
timestamp=datetime.strptime(post['p_pubdate'], "%Y-%m-%dT%H:%M:%S+00:00"
))
e.set_author(
name=post['author']['p_author'],
url=post['p_author_link'],
icon_url=post['author']['p_author_img']
)
try:
await channel.send(embed=e)
except discord.Forbidden:
print("[DEBUG][nf] [{0}] No permissions in {1}".format(guild, channel))
continue
except discord.HTTPException as e:
print("[DEBUG][nf]HTTPError in {0} - {1}".format(guild, str(e)))
continue
else:
await asyncio.sleep(1)
self.c.execute('UPDATE apb_news_feed SET PostID = ? WHERE ID = ?', (post['p_id'], server[0]))
self.connection.commit()
print('[DEBUG] APB NEWS FEED : EVENT : LOOP COMPLETED (90 s)')
await asyncio.sleep(90)
async def item_search(self, query):
query_converted = query.replace(' ', '%20')
async with aiohttp.ClientSession() as session:
async with session.get(self.API_URL + 'search/?q={}'.format(query_converted)) as resp:
if resp.status == 200:
item = await resp.json()
return True, item[0]
else:
return False, 'spacer'
async def drive_type(self, drivetype):
if drivetype is 0:
return 'RWD'
elif drivetype is 1:
return 'FWD'
elif drivetype is 2:
return 'AWD'
return 'Unkown'
async def apb_e(self, ctx, title, description=None):
post = discord.Embed(title=title, description=description, color=0xFF0000)
post.set_author(name='APB Extension')
await ctx.send(ctx.message.channel, embed=post)
async def rem_color_code(self, str):
TAG_RE = re.compile(r'<[^>]+>')
return TAG_RE.sub('', str)
def setup(bot):
bot.add_cog(APBDB2(bot))
| SKaydev/apbdb-discord | ext/apbdb2.py | apbdb2.py | py | 14,874 | python | en | code | 0 | github-code | 50 |
26661597316 | """
Main tests
"""
import unittest
from db.sql.connection.singleton import Database
from main import build_db, upload_csv
class MainTests(unittest.TestCase):
"""
Main Unit Tests
"""
def create_test_db(self):
"""
Setup the test database in RAM
"""
headers = {
"product_id": "varchar",
"quantity": "integer",
"wholesale_price": "double precision",
"sale_price": "double precision",
"supplier_id": "varchar"
}
build_db(":memory:", "products", headers)
def test_build_db(self):
"""
Test build db in main
"""
self.create_test_db()
db = Database.instance(None)
c = db.cursor()
try:
c.execute("SELECT * FROM products")
finally:
c.close()
def test_upload_csv(self):
"""
Test uploading the CSV file
"""
self.create_test_db()
headers = {
"product_id": "varchar",
"quantity": "integer",
"wholesale_price": "double precision",
"sale_price": "double precision",
"supplier_id": "varchar"
}
upload_csv(headers.keys(), True)
db = Database.instance(None)
c = db.cursor()
try:
rows = []
for row in c.execute("SELECT * FROM products"):
rows.append(row)
assert len(rows) > 10000
finally:
c.close()
| ThePinkPythons/Database-Project | tests/test.py | test.py | py | 1,517 | python | en | code | 0 | github-code | 50 |
26779225638 | def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
# figure size
SMALL_FIGURE_SIZE=cm2inch(6,4.5) #(8,6)
BIG_FIGURE_SIZE=cm2inch(12,6)
# content size
LINE_WIDTH=1
MARKER_SIZE=7
MARKEREDGE_WIDTH=1
LEGEND_SIZE=7
# label text size
TITLE_SIZE=10
XY_LABEL_SIZE=8
TICK_LABEL_SIZE=6
# spines width
XY_SPINES_WIDTH=1
# grid
GRID_WIDTH=1
| Si3ver/SVNF | src/figure_style.py | figure_style.py | py | 472 | python | en | code | 24 | github-code | 50 |
13034541438 | #Chris Kopacz
#Python Exercises from Github
#Level 1, question 1
#created: 20 June 2017
"""
Question 1
Level 1
Question:
Write a program which will find all such numbers which are divisible by 7 but are not a
multiple of 5, between 2000 and 3200 (both included).
The numbers obtained should be printed in a comma-separated sequence on a single line.
"""
aList = []
min = 2000
max = 3200
for iter in range(min, max+1):
if iter%7==0 and iter%5!=0:
#aList.append(iter)
aList.append(str(iter))
#print(aList)
print(','.join(aList))
| chriskopacz/python_practice | Problems/lev1/lev1_q1.py | lev1_q1.py | py | 552 | python | en | code | 0 | github-code | 50 |
14421032000 | from utils import load_data
###
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
import numpy as np
from sklearn.preprocessing import Imputer, normalize
from sklearn.metrics import normalized_mutual_info_score
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import make_scorer
import matplotlib.pyplot as plt
###
###
def preprocess_data(data):
# handle missing values;
# scale the data
imp = Imputer()
imp.fit_transform(data)
data = imp.transform(data)
data = normalize(data)
return data
###
###
def clean_labels(labels):
# 2 -> 0; 4 -> 1
temp = []
for l in labels:
if l == 2:
temp.append(0)
else:
temp.append(1)
return np.array(temp)
###
def report_nmi(data, target, model_list):
# your work here...
pass
###
nmi_list = []
for model in model_list:
y_pred = model.fit_predict(data)
nmi_list.append(normalized_mutual_info_score(y_pred, target))
return nmi_list
###
if __name__ == '__main__':
kmeans = KMeans(2)
model_list = [kmeans]
# your work here...
###
all_data = load_data('breast-cancer-wisconsin.data')
data = all_data[:, 1:-1] # discarding the id
target = all_data[:, -1]
target = clean_labels(target)
data = preprocess_data(data)
_, X_val, _, y_val = train_test_split(data, target, test_size=0.2)
agglo = AgglomerativeClustering(2)
agglo_cosine = AgglomerativeClustering(2, affinity='cosine', linkage='average')
agglo_cosine_2 = AgglomerativeClustering(2, affinity='cosine', linkage='complete')
dbscan_bad = DBSCAN(eps=0.5)
dbscan_good = DBSCAN(eps=0.1)
model_list = [kmeans, agglo, agglo_cosine, agglo_cosine_2, dbscan_bad, dbscan_good]
nmi_list = report_nmi(data, target, model_list)
plt.plot(nmi_list)
print(nmi_list)
params = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5]
best = 0
best_eps = 0.01
for p in params:
model_list = [DBSCAN(eps=p)]
dbscan_nmi_list = report_nmi(X_val, y_val, model_list)
print(dbscan_nmi_list[0])
if dbscan_nmi_list[0] > best:
best = dbscan_nmi_list[0]
best_eps = p
y_pred_best = DBSCAN(eps=best_eps).fit_predict(data)
print(normalized_mutual_info_score(y_pred_best, target))
plt.show()
###
| Pibborn/mockup-exam | ex1_clustering.py | ex1_clustering.py | py | 2,383 | python | en | code | 0 | github-code | 50 |
20564069413 | import pygame
from sglib import settings
class Button():
def __init__(self,img,but_loc,size):
self.scl_fac = settings.scale_factor()[0]
self.location = (int(but_loc[0]* self.scl_fac), int(but_loc[1]* self.scl_fac))
self.size = int(size * self.scl_fac)
self.img = pygame.image.load(img)
self.img = pygame.transform.scale(self.img,(self.size,self.size))
def draw_button(self,SCREEN):
SCREEN.blit(self.img,self.location)
#cursor ve button üst üste mi
def isoverlap(self,mouse_loc):
if mouse_loc[0] > self.location[0] and mouse_loc[0] < self.location[0] + self.size and mouse_loc[1] > self.location[1] and mouse_loc[1] < self.location[1] + self.size:
return True
else :
return False | StrayRaider/solargolf | sglib/buttons.py | buttons.py | py | 804 | python | en | code | 1 | github-code | 50 |
1789163298 | DEFAULT_ACE_HIGH = True
SUITS = {
'spade': { 'name': 'Spade', 'symbol': 'S', 'value': 4 },
'heart': { 'name': 'Heart', 'symbol': 'H', 'value': 3 },
'diamond': { 'name': 'Diamond', 'symbol': 'D', 'value': 2 },
'club': { 'name': 'Club', 'symbol': 'C', 'value': 1 }
}
RANKS = {
'ace': { 'name': 'Ace', 'symbol': 'A', 'value': 14 },
'king': { 'name': 'King', 'symbol': 'K', 'value': 13 },
'queen': { 'name': 'Queen', 'symbol': 'Q', 'value': 12 },
'jack': { 'name': 'Jack', 'symbol': 'J', 'value': 11 },
'ten': { 'name': 'Ten', 'symbol': 'T', 'value': 10 },
'nine': { 'name': 'Nine', 'symbol': '9', 'value': 9 },
'eight': { 'name': 'Eight', 'symbol': '8', 'value': 8 },
'seven': { 'name': 'Seven', 'symbol': '7', 'value': 7 },
'six': { 'name': 'Six', 'symbol': '6', 'value': 6 },
'five': { 'name': 'Five', 'symbol': '5', 'value': 5 },
'four': { 'name': 'Four', 'symbol': '4', 'value': 4 },
'three': { 'name': 'Three', 'symbol': '3', 'value': 3 },
'two': { 'name': 'Two', 'symbol': '2', 'value': 2 },
}
class Card:
# TODO: ace_high will always be true for now. Need to figure out how to handle when it is false
# and if keeping track of that here is the best place to do so
def __init__(self, suit, rank, ace_high=DEFAULT_ACE_HIGH):
"""
Initializes a card, setting a suit and rank.
:param suit: suit of the card
:type suit: string
:param rank: rank of the card
:type rank: string
:param ace_high: whether or not to treat ace as a high card
:type ace_high: bool
"""
self.suit = SUITS[suit]
self.rank = RANKS[rank]
self.ace_high = ace_high
def __repr__(self):
"""
Repsentation of the card (of the form "<rank> of <suit>s".
:rtype: string
"""
return "{}, {}, ace_high={}".format(self.suit['name'], self.rank['name'], self.ace_high)
def __str__(self):
"""
Pretty string representation of the card.
:return: what the printing the card should show
:rtype: string
"""
return "{} of {}s".format(self.rank['name'], self.suit['name'])
def __lt__(self, comp_card):
"""
Returns true if self is less than <comp_card>. First we compare by suit:
Spade > Heart > Diamond > Club. If suits are equal, we compare by rank:
King > Queen > Jack > Ten > ... > Deuce. Aces are special in that they
can be high or low. In that case, we check self.ace_high to determine
what course of action to take.
:param comp_card: card to compare self to
:type comp_card: Card
:return: True if self less than <card>, otherwise False.
:rtype: bool
"""
if self.suit['value'] == comp_card.suit['value']:
return self.rank['value'] < comp_card.rank['value']
else:
return self.suit['value'] < comp_card.suit['value']
def __eq__(self, comp_card):
"""
Returns true if self is equal to <comp_card>. This means that both suit and rank must match.
:param comp_card: card to compare self to
:type comp_card: Card
:return: True if self equal to <comp_card>, otherwise False.
:rtype: bool
"""
return (self.suit['value'] == comp_card.suit['value'] and
self.rank['value'] == comp_card.rank['value'])
def __gt__(self, comp_card):
"""
Returns true if self is less than <comp_card>. First we compare by suit:
Spade > Heart > Diamond > Club.
If suits are equal, we compare by rank:
King > Queen > Jack > Ten > ... > Deuce. Aces are special in that they can be high or low.
In that case, we check self.ace_high to determine what course of action to take.
:param comp_card: card to compare self to
:type comp_card: Card
:return: True if self greater than <comp_card>, otherwise False.
:rtype: bool
"""
if self.suit['value'] == comp_card.suit['value']:
return self.rank['value'] > comp_card.rank['value']
else:
return self.suit['value'] > comp_card.suit['value']
| foole/pokersim | pokersim/deck/card.py | card.py | py | 4,661 | python | en | code | 1 | github-code | 50 |
14838014395 | voucher_price = int(input())
purchase = input()
tickets = 0
other = 0
price = 0
while purchase != "End":
letter_1 = purchase[0]
letter_2 = purchase[1]
if len(purchase) > 8:
price = ord(letter_1) + ord(letter_2)
else:
price = ord(letter_1)
voucher_price -= price
if voucher_price >= 0:
if len(purchase) > 8:
tickets += 1
if len(purchase) <= 8 and purchase != "End":
other += 1
else:
break
purchase = input()
print(tickets)
print(other)
| Pavlina-G/Softuni-Programming-Basics | 07. PB Exams/2019/04_2_cinema_voucher.py | 04_2_cinema_voucher.py | py | 558 | python | en | code | 0 | github-code | 50 |
43192459738 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA, headB):
if headA==None or headB==None:
return None
cur1=headA
cur2=headB
count1=count2=0
while cur1!=None:
count1+=1
cur1=cur1.next
while cur2!=None:
count2+=1
cur2=cur2.next
cur1=headA
cur2=headB
if count1>count2:
i=0
while cur1!=None and i<count1-count2:
cur1=cur1.next
i+=1
else:
i=0
while cur2!=None and i<count2-count1:
cur2=cur2.next
i+=1
while cur1!=None and cur2!=None and cur1!=cur2:
cur1=cur1.next
cur2=cur2.next
return cur1
| phoanghuong86/leetcode-daily-challenges | 06Jun-160. Intersection of Two Linked Lists.py | 06Jun-160. Intersection of Two Linked Lists.py | py | 919 | python | en | code | 0 | github-code | 50 |
8026431330 | from PIL import Image
import pytesseract as tess
import pyautogui
import time
import re
import random
tess.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
pyautogui.click(1670, 10, interval=0.4)
# start_time = time.time()
it = 0
while True:
# duration = (time.time() - start_time)
it +=1
#! for normal test
pyautogui.screenshot('image.png', region=(300, 330, 1200, 80))
#! for competition
# pyautogui.screenshot('image.png', region=(320, 285, 1060, 60))
img = Image.open('image.png')
text = tess.image_to_string(img)
list = re.findall(r'\w+', text)
result = len(list)
if len(text) < 40:
exit()
#!Error
if it==3:
for i in range(0, result):
word = random.choice(list)
pyautogui.write(word, 0.07)
pyautogui.press('space')
text = ''
#! for normal typing
pyautogui.screenshot('image.png', region=(300, 330, 1200, 80))
#! for competition
# pyautogui.screenshot('image.png', region=(320, 285, 1060, 60))
#!Writer. typing speed (0.06 == 165 and 0.045==210, 0.06 - 0.07is recommended)
img = Image.open('image.png')
text = tess.image_to_string(img)
pyautogui.write(text, 0.09)
pyautogui.press('space')
| HamidTheDev/Typing-Master | main.py | main.py | py | 1,285 | python | en | code | 1 | github-code | 50 |
33868419356 | # -*- coding: utf-8 -*-
import fastText
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string
import stop_words
import pathlib
def tokenize(text):
return word_tokenize(text, language='french')
class FeaturesExtractor:
""" Handle features extractions based on word embeddings (fasttext) """
def __init__(self,
model_path: str = 'data/cc.fr.300.bin'):
assert model_path.endswith('.bin'), 'model_path should be a .bin file'
assert pathlib.Path(model_path).exists(), 'model_path does not exists'
self.stop_words = set(stopwords.words('french') +
list(string.punctuation) +
stop_words.get_stop_words('fr'))
print(('loading model could take a while...'
' and takes up to 7GO of RAM'))
self.model = fastText.load_model(model_path)
def get_features(self, response: str):
"""
"""
assert type(response) == str, 'response must be a string'
words = tokenize(response)
words = [x for x in words if x not in self.stop_words]
return self.model.get_sentence_vector(' '.join(words))
# if __name__ == "__main__":
# s = FeaturesExtractor()
# features = [s.get_features(x) for x in responses]
# from sklearn.cluster import KMeans
# k = KMeans(n_clusters=15)
# k.fit(np.array(features))
# df = pd.DataFrame({'label': k.labels_, 'response': responses})
# for label in df.label.unique():
# print('label {}'.format(label))
# samples = [x for x in df[df.label==label].sample(10).response.tolist()]
# for sample in samples:
# print(sample)
# print('#'*20)
| raphaelreme/SD | SD210/granddebats/src/kmeans_embeddings.py | kmeans_embeddings.py | py | 1,807 | python | en | code | 0 | github-code | 50 |
14606903043 | from random import gauss, random
import scipy as sp
import numpy as np
from scipy.fft import fft
import math
class HurstModel(object):
__instance = None
def __init__(self):
self.filename = ''
self.time_series = []
self.hurst_list = []
self.dim_list = []
if HurstModel.__instance:
self.__instance = self.get_instance()
# Метод, обеспечивающий реализацию паттерна Singleton
@classmethod
def get_instance(cls):
if not cls.__instance:
cls.__instance = HurstModel()
return cls.__instance
# Выгрузка исходного временного ряда из файла
def load_data(self, filename):
self.filename = filename
path = f'Кардиограммы\\{filename}' # Путь до файла из директории, в которой находится main.py
with open(path, 'r') as file: # Чтение файла
self.time_series = list(map(float, file.readlines()))
def send_value(self):
pass
# Вычисление показателя Херста для среза временного ряда
@staticmethod
def hurst(time_series, timepoint: int, coeff: float):
n = len(time_series[:timepoint]) # Кол-во элементов
mean_value = sum(time_series[:timepoint]) / n # Среднее
deviation_list = [x-mean_value for x in time_series[:timepoint]] # Подсчет отклонений
amplitude = max(deviation_list)-min(deviation_list) # Размах отклонений
std = (sum([(x - mean_value) ** 2 for x in time_series[:timepoint]]) / n) ** 0.5 # Стандартное отклонение
return math.log(amplitude / std) / math.log(coeff * n) # Вычисление показателя Херста
# Вычисление списков показателей Херста и фрактальной размерности для каждого среза (начиная с 5го элемента)
def hurst_calculate(self):
self.hurst_list = []
self.dim_list = []
for i in range(4, len(self.time_series) + 1):
self.hurst_list.append(HurstModel.hurst(time_series=self.time_series, timepoint=i, coeff=0.5))
self.dim_list.append(2 - self.hurst_list[-1])
self.send_value()
def generate_timeserires(self):
sigma = 1
H = 0.5
N = np.power(2, 10)
Fourier_seq = [gauss(0, sigma)] + [
gauss(0, 1) * np.exp(2 * np.pi * complex(0, 1) * random()) / (np.power(i, H + 0.5)) for i in
range(1, int(N / 2))] + [
gauss(0, sigma) * (2 * np.pi * complex(0, 1) * random()).real / (np.power(N / 2, H + 0.5))]
Fourier_seq += [Fourier_seq[N - i].conjugate() for i in range(int(N / 2) + 1, N)]
self.time_series = list(map(float, sp.fft.ifft(Fourier_seq)))
| Krukrukruzhka/Fractal_dimension | model.py | model.py | py | 3,010 | python | ru | code | 0 | github-code | 50 |
42822512069 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `icenlp_bridge` package."""
import os
import unittest
from icenlp_bridge import init, parse
_SKIP = os.environ.get('ICENLP_DISABLE_TEST') == 'true'
class TestIcenlp_bridge(unittest.TestCase):
"""Tests for `icenlp_bridge` package."""
def test_failed_init(self):
with self.assertRaises(Exception):
init('localhost', 4321)
@unittest.skipIf(_SKIP, 'IceNLP server not running')
def test_init(self):
init('localhost', 1234)
@unittest.skipIf(_SKIP, 'IceNLP server not running')
def test_parse(self):
init('localhost', 1234)
result = parse('Áframhaldandi úrkoma í dag')
self.assertIn('Áframhaldandi', result)
| sverrirab/icenlp_bridge | tests/test_icenlp_bridge.py | test_icenlp_bridge.py | py | 749 | python | en | code | 3 | github-code | 50 |
11954296623 | '''
https://www.acmicpc.net/problem/11724
'''
from collections import deque
n, m = map(int, input().split())
data = [[] for _ in range(n+1)]
for i in range(m):
a, b = map(int, input().split())
data[a].append(b)
data[b].append(a)
visited = [False] * (n+1)
def dfs(start):
visited[start] = True
# if not data[start]:
# return True
for next in data[start]:
if not visited[next]:
dfs(next)
count = 0
for i in range(1,n+1):
if not visited[i]:
dfs(i)
count += 1
print(count) | gogongkong/Python_Study | Month_06/Wk26/0630/백준11724번_DFS_연결요소의갯수.py | 백준11724번_DFS_연결요소의갯수.py | py | 560 | python | en | code | 2 | github-code | 50 |
10495689933 | import pandas as pd
# The absolute path of the csv file.
PATH = r"C:/Users/hzb/Desktop/毕业设计/热塑性材料信息.xlsx"
if __name__ == "__main__":
df = pd.read_excel(PATH, engine='openpyxl')
# 1. Remove all empty columns.
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
# 2. Fill empty values in TEXT fields.
df.iloc[:, list(range(13))].fillna("未获得", inplace=True)
# 3. Convert nemerical field to float and fill empty values as NaN.
for num_field_idx in range(13, 59):
# Use type transform and catch the errors with blank values.
df.iloc[:, num_field_idx] = pd.to_numeric(df.iloc[:, num_field_idx], errors="coerce")
df.insert(0, "id", range(0, len(df)))
# 4. Remove duplicate field values and reserve the column.
df.链接 = pd.Series([df.链接 == df.制造商]).replace(True, None, inplace=True)
df.测试日期 = pd.Series([df.测试日期 == df.上次修改日期]).replace(True, None, inplace=True)
# 5. Detect data exceptions.
mis_arr = [False] * 20 + list(df[20:].热膨胀数据横向各向同性系数alpha2.isna())
df["列错乱"] = mis_arr
df.to_csv(r"D:/test.csv", index=None)
| EMUNES/hust-mdb | data-process/csv_data_clean.py | csv_data_clean.py | py | 1,209 | python | en | code | 2 | github-code | 50 |
14257012019 | import collections
import copy
def solution(n, wires):
result = [[] for _ in range(n-1)]
for ind, wire in enumerate(wires) :
graph = collections.defaultdict(list)
wires_ = copy.deepcopy(wires)
wires_.remove(wire)
for wire_ in wires_ :
graph[wire_[0]].append(wire_[1])
graph[wire_[1]].append(wire_[0])
def dfs(key, visited) :
for value in graph[key] :
if value not in visited :
visited.append(value)
dfs(value, visited)
# print(graph)
visited = []
for key in graph.keys() :
if key not in visited :
visited.append(key)
dfs(key, visited)
result[ind].append(len(visited))
# print(result)
smallest = 9999
for r in result :
if len(r) == 1 :
residual = n - r[0]
if (r[0] - residual) < smallest :
smallest = (r[0] - residual)
else :
if abs((max(r) - min(r)) - min(r)) < smallest :
smallest = abs((max(r) - min(r)) - min(r))
return smallest | miiiingi/algorithmstudy | bruteforce/6.py | 6.py | py | 1,174 | python | en | code | 0 | github-code | 50 |
23471789937 | #!/usr/bin/env python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_app_platform/license/LICENSE
#
##############################################################################
# Imports
##############################################################################
# enable some python3 compatibility options:
# (unicode_literals not compatible with python2 uuid module)
from __future__ import absolute_import, print_function
from nose.tools import assert_equal, assert_false, assert_true
import os
import shutil
import tempfile
import rocon_console.console as console
from rocon_app_utilities.rapp_repositories import build_index, get_index, get_index_dest_prefix_for_base_paths, has_index, load_index, load_uris, save_uris
##############################################################################
# Tests
##############################################################################
pwd = os.getcwd()
def test_rapp_repositories():
tempdir = tempfile.mkdtemp(suffix='', prefix='test_rapp_repositories_')
# override default location of respoitory list file and cached index archives
import rocon_app_utilities.rapp_repositories
rocon_app_utilities.rapp_repositories._rapp_repositories_list_file = os.path.join(tempdir, 'rapp.list')
uris = load_uris()
assert_equal(uris, ['ROS_PACKAGE_PATH'])
try:
save_uris([])
uris = load_uris()
#assert_equal(uris, [])
repo_path = os.path.join(os.path.dirname(__file__), 'test_rapp_repos')
index = build_index([repo_path])
assert_equal(index.raw_data.keys(), ['test_package_for_rapps/foo'])
cache_exists = has_index(repo_path)
assert_false(cache_exists)
archive_prefix = get_index_dest_prefix_for_base_paths(repo_path)
index.write_tarball(archive_prefix)
cache_exists = has_index(repo_path)
assert_true(cache_exists)
archive_path = '%s.index.tar.gz' % archive_prefix
index2 = load_index('file://%s' % archive_path)
index2 = get_index(repo_path)
assert_equal(index2.raw_data.keys(), ['test_package_for_rapps/foo'])
finally:
shutil.rmtree(tempdir)
| robotics-in-concert/rocon_app_platform | rocon_app_utilities/tests/test_rapp_repositories.py | test_rapp_repositories.py | py | 2,199 | python | en | code | 8 | github-code | 50 |
26911266975 | import os
VariantDir('build', 'src')
GTEST_HOME = '/home/shanai/oss/gtest-1.6.0'
GTEST_INCLUDE = os.path.join(GTEST_HOME, 'include')
testEnv = Environment(
ENV = os.environ,
CCFLAGS='-O0 -ggdb -Wall -I %s' % (GTEST_INCLUDE),
CFLAGS='-fprofile-arcs -ftest-coverage -std=c99',
LINKFLAGS='-fprofile-arcs -ftest-coverage',
LIBPATH=GTEST_HOME,
LIBS=['pthread', 'libgtest']
)
uninitialized01Prog = testEnv.Program(
'uninitialized01.ut',
['build/uninitialized01.c', 'build/uninitialized01_test.cc']
)
illegalAccess01Prog = testEnv.Program(
'illegalAccess01.ut',
['build/illegal_access01.c', 'build/illegal_access01_test.cc']
)
memoryLeak01Prog = testEnv.Program(
'memoryLeak01.ut',
['build/memory_leak01.c', 'build/memory_leak01_test.cc']
)
strcpy01Prog = testEnv.Program(
'strcpy01.ut',
['build/strcpy01.c', 'build/strcpy01_test.cc']
)
artifacts = [
uninitialized01Prog,
illegalAccess01Prog,
memoryLeak01Prog,
strcpy01Prog
]
testEnv.Clean(artifacts, 'build')
Default(artifacts)
| wikibook/modern-c-programming | chapter06/valgrind01/SConstruct | SConstruct | 1,054 | python | en | code | 5 | github-code | 50 | |
40161652880 | #-*- coding: utf-8 -*-
#pylint: disable-msg=W0122,R0914
"""
File : utils.py
Author : Valentin Kuznetsov <vkuznet@gmail.com>
Description: Utilities module
"""
from __future__ import print_function
# system modules
from builtins import range
import os
import re
import sys
import pwd
import pprint
import subprocess
# template tag pattern
TAG = re.compile(r'[a-zA-Z0-9]')
def template_directory():
"Return location of template directory"
mkTemplates = "src/FWCore/Skeletons/mkTemplates"
# Check developer area first
if "CMSSW_BASE" in os.environ:
ret = os.path.join(os.environ["CMSSW_BASE"], mkTemplates)
if os.path.exists(ret):
return ret
# Then release area
ret = os.path.join(os.environ["CMSSW_RELEASE_BASE"], mkTemplates)
if not os.path.exists(ret):
raise Exception("Did not find 'FWCore/Skeletons/mkTemplates' directory in the developer area nor in the release area")
return ret
def parse_word(word):
"Parse word which contas double underscore tag"
output = set()
words = word.split()
for idx in range(0, len(words)):
pat = words[idx]
if pat and len(pat) > 4 and pat[:2] == '__': # we found enclosure
tag = pat[2:pat.rfind('__')]
if tag.find('__') != -1: # another pattern
for item in tag.split('__'):
if TAG.match(item):
output.add('__%s__' % item)
else:
output.add('__%s__' % tag)
return output
def test_env(tdir, tmpl):
"""
Test user environment, look-up if user has run cmsenv, otherwise
provide meaningful error message back to the user.
"""
if not tdir or not os.path.isdir(tdir):
print("Unable to access template dir: %s" % tdir)
sys.exit(1)
if not os.listdir(tdir):
print("No template files found in template dir %s" % tdir)
sys.exit(0)
if not tmpl:
msg = "No template type is provided, "
msg += "see available templates via --templates option"
print(msg)
sys.exit(1)
def functor(code, kwds, debug=0):
"""
Auto-generate and execute function with given code and configuration
For details of compile/exec/eval see
http://lucumr.pocoo.org/2011/2/1/exec-in-python/
"""
args = []
for key, val in kwds.items():
if isinstance(val, str):
arg = '%s="%s"' % (key, val)
elif isinstance(val, list):
arg = '%s=%s' % (key, val)
else:
msg = 'Unsupported data type "%s" <%s>' % (val, type(val))
raise Exception(msg)
args.append(arg)
func = '\nimport sys'
func += '\nimport io'
func += "\ndef func(%s):\n" % ','.join(args)
func += code
func += """
def capture():
"Capture snippet printous"
old_stdout = sys.stdout
sys.stdout = io.StringIO()
func()
out = sys.stdout.getvalue()
sys.stdout = old_stdout
return out\n
capture()\n"""
if debug:
print("\n### generated code\n")
print(func)
# compile python code as exec statement
obj = compile(func, '<string>', 'exec')
# define execution namespace
namespace = {}
# execute compiled python code in given namespace
exec(obj, namespace)
# located generated function object, run it and return its results
return namespace['capture']()
def user_info(ainput=None):
"Return user name and office location, based on UNIX finger"
if ainput:
return ainput
pwdstr = pwd.getpwnam(os.getlogin())
author = pwdstr.pw_gecos
if author and isinstance(author, str):
author = author.split(',')[0]
return author
def code_generator(kwds):
"""
Code generator function, parse user arguments, load and
return appropriate template generator module.
"""
debug = kwds.get('debug', None)
if debug:
print("Configuration:")
pprint.pprint(kwds)
try:
klass = kwds.get('tmpl')
mname = 'FWCore.Skeletons.%s' % klass.lower()
module = __import__(mname, fromlist=[klass])
except ImportError as err:
klass = 'AbstractPkg'
module = __import__('FWCore.Skeletons.pkg', fromlist=[klass])
if debug:
print("%s, will use %s" % (str(err), klass))
obj = getattr(module, klass)(kwds)
return obj
def tree(idir):
"Print directory content, similar to tree UNIX command"
if idir[-1] == '/':
idir = idir[-1]
dsep = ''
fsep = ''
dtot = -1 # we'll not count initial directory
ftot = 0
for root, dirs, files in os.walk(idir):
dirs = root.split('/')
ndirs = len(dirs)
if ndirs > 1:
dsep = '| '*(ndirs-1)
print('%s%s/' % (dsep, dirs[-1]))
dtot += 1
for fname in files:
fsep = dsep + '|--'
print('%s %s' % (fsep, fname))
ftot += 1
if dtot == -1 or not dtot:
dmsg = ''
else:
dmsg = '%s directories,' % dtot
if ftot:
fmsg = '%s file' % ftot
if ftot > 1:
fmsg += 's'
else:
fmsg = ''
if dmsg and fmsg:
print("Total: %s %s" % (dmsg, fmsg))
else:
print("No directories/files in %s" % idir)
| cms-sw/cmssw | FWCore/Skeletons/python/utils.py | utils.py | py | 5,307 | python | en | code | 985 | github-code | 50 |
31214484230 | # -*- coding: utf-8 -*-
# © 2017 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate(use_env=False)
def migrate(cr, version):
# the table exists already, so the ORM doesn't create an id column
cr.execute(
'alter table mail_message_res_partner_needaction_rel '
'add column id serial not null primary key'
)
# as 9.0 deleted notifications for read messages, all existing
# notifications are unread by definition
cr.execute(
'update mail_message_res_partner_needaction_rel set is_read=False'
)
# set email properties
cr.execute(
"""update mail_message_res_partner_needaction_rel rel
set is_email=True, email_status=case
when m.state in ('sent', 'exception') then m.state
else 'ready'
end
from mail_mail m
where rel.mail_message_id=m.mail_message_id"""
)
openupgrade.load_data(
cr, 'mail', 'migrations/10.0.1.0/noupdate_changes.xml')
| kaerdsar/OpenUpgrade | addons/mail/migrations/10.0.1.0/post-migration.py | post-migration.py | py | 1,077 | python | en | code | null | github-code | 50 |
35980714882 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 6 11:37:14 2016
@author: jschepers
"""
import numpy as np
from PIL import Image
from psychopy import visual
import tools
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from PIL import ImageDraw
path_to_fixdur_files, path_to_fixdur_code = tools.paths()
rectXY = (1920,1080);
surf = visual.Window(size=rectXY,fullscr=False,winType = 'pyglet', screen=1, units='pix')
# set parameters for gaussian
bubble_size = 1.1774
size = round(tools.deg_2_px(bubble_size))*3
fwhm = round(tools.deg_2_px(bubble_size))
center = None
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
gaussian = np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
#gaussian = gaussian * 255
gaussian = -(gaussian*2-1)
#imgplot = plt.imshow(gaussian)
#mask_im = Image.new('RGB', (1280,960), 'red')
#mask_im = visual.Circle(surf, units='pix', radius=100)
mask_im = np.ones((1080,1920))
#mask_im = np.ones((960,1280))
#mask_im[380:560,550:730] = gaussian
mask_im[450:630,870:1050] = gaussian
#imgplot = plt.imshow(mask_im)
original = Image.open(path_to_fixdur_files+'stimuli/natural/image_5.bmp')
#original = original.resize((1920,1080))
imgplot = plt.imshow(mask_im)
# Source: https://opensource.com/life/15/2/resize-images-python
basewidth = 1920
wpercent = (basewidth / float(original.size[0]))
hsize = int((float(original.size[1]) * float(wpercent)))
#original = original.resize((basewidth, hsize), Image.ANTIALIAS)
black_white = original.convert('L')
# imgplot = plt.imshow(original)
original_im = visual.ImageStim(surf, image=black_white, mask=mask_im, units='pix')
#original_im = visual.ImageStim(surf, image=path_to_fixdur_files+'stimuli/natural/image_5.bmp', mask=-gaussian)
#grating = visual.GratingStim(win=surf, mask='circle', size=300, pos=[-4,0], sf=3)
#grating.draw()
#background = Image.new("RGB", (1280, 960), "gray")
#black_white = original_im.convert('L')
original_im.draw()
surf.flip()
| behinger/fixdur | experiment/experiment2/whole_image.py | whole_image.py | py | 2,056 | python | en | code | 0 | github-code | 50 |
14206036439 | import random
import string
from graph import Vertex, Graph
def get_words(text_path):
with open(text_path, 'r') as reader:
text = reader.read()
text = ' '.join(text.split())
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation))
words = text.split()
return words
def make_graph(words):
G = Graph()
prev_word = None
for word in words:
curr_vertex = G.get_vertex(word)
if prev_word:
prev_word.increment_edge(curr_vertex)
prev_word = curr_vertex
G.generate_prob_mappings()
return G
def create(G, words, length=10):
quote = []
word = G.get_vertex(random.choice(words))
for _ in range(length):
quote.append(word.value)
word = G.get_next_word(word)
return quote
def main():
words = get_words('files/random_quote.txt')
G = make_graph(words)
quote = create(G, words, 15)
return ' '.join(quote)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print(main())
# pass
| mostlovedpotato/Random_Composer | main.py | main.py | py | 1,097 | python | en | code | 0 | github-code | 50 |
31771523935 | import Maps
import numpy as np
#CONSTANT DECLARATIONS
SPOTNUM = 20
#Dimensions
TILESIZE = 10
MAPWIDTH = 64
MAPHEIGHT = 64
#Constants representing map resources
NONE = 0
LOW = 1
MED = 2
HIGH = 3
SPOT = 4
WALL = 5
#Class for Wifi hotspots
class Spot:
def __init__(self, x=None, y=None):
#Handling for optional parameters
if x and y:
self.x_pos = x
self.y_pos = y
else:
self.x_pos = np.random.randint(1,MAPWIDTH-1)
self.y_pos = np.random.randint(1,MAPHEIGHT-1)
self.spotType = np.random.randint(1,4)
#Draw self onto map
def draw(self, tilemap):
#Iterates through surrounding map area
for i in range(self.x_pos - self.spotType, self.x_pos + self.spotType+1):
for j in range(self.y_pos - self.spotType, self.y_pos + self.spotType+1):
blockedFlag = False
try:
#Ternary operator to determine loop direction
direction = 1 if (i < self.x_pos) else -1
#Draw imaginary line to origin and check for walls
for xBlocking in range(i,self.x_pos, direction):
if tilemap[xBlocking, j] == WALL:
blockedFlag = True
direction = 1 if (j < self.y_pos) else -1
for yBlocking in range(j,self.y_pos, direction):
if tilemap[i, yBlocking] == WALL:
blockedFlag = True
#Collision priorities
if tilemap[i,j] < self.spotType:
if blockedFlag == False:
tilemap[i,j] = self.spotType
except IndexError:
next
#Marks location of the spot itself
tilemap[self.x_pos,self.y_pos] = SPOT
#Returns map with spot & signal drawn
return tilemap
#Fitness function for genetic algorithm
#NOTE: Must be called AFTER drawn onto map
def getFitness(self, tilemap):
signalCounter = 0
#Iterates through surrounding map area
for i in range(self.x_pos - self.spotType, self.x_pos + self.spotType+1):
for j in range(self.y_pos - self.spotType, self.y_pos + self.spotType+1):
try:
if tilemap[i,j] == self.spotType:
signalCounter+=1
except IndexError:
next
signalSizes= {
LOW : 3*3,
MED : 5*5,
HIGH : 7*7}
fitness = signalCounter / signalSizes.get(self.spotType) * 100
return fitness
#Class comprising the population
class iMap:
def __init__(self,tilemap,spotList=None):
if spotList == None:
self.spotList = self.generateSpots(SPOTNUM)
else:
self.spotList = spotList
self.tilemap = np.copy(tilemap)
self.fitness = 0
#Returns list of newly generated spots
def generateSpots(self,n):
newSpotList = list()
for i in range(n):
newSpot = Spot()
newSpotList.append(newSpot)
return newSpotList
#Draws list of spots onto map
def drawSpots(self):
#Preserves original map
for item in self.spotList:
self.tilemap = item.draw(self.tilemap)
#Evaluates fitness of the iMap
#Based on allele fitness and overall map coverage
def mapFitness(self):
#Amount of open space in original map
tilemapArea = (self.tilemap == NONE).sum()
#Draws Wifi spots onto map
self.drawSpots()
#Amount of open space after spots drawn
spotMapArea = (self.tilemap == NONE).sum()
#Gets overall coverage for fitness calculation
#Equates to percentage of free space now covered by signal
#signalFitness = 100 - (spotMapArea / tilemapArea * 100)
signalFitness = tilemapArea - spotMapArea
#Gets average fitness of each allele
alleleFitness = 0
for spot in self.spotList:
alleleFitness = (alleleFitness + spot.getFitness(self.tilemap))
self.fitness = signalFitness + alleleFitness*1.5
#Generates N random population of spotMaps
#Also gets fitness of each map
def generatePopulation(N,tilemap):
population = list()
for i in range(N):
newiMap = iMap(tilemap)
newiMap.mapFitness()
population.append(newiMap)
return population
def pickParents(population,N):
population.sort(key=lambda x: x.fitness, reverse=True)
#Only selects best parents
parents = list()
for i in range(N):
parents.append(population[i])
return parents
#Applies random genetic variation to an individual
def mutate(child):
#Chance of each mutation
moveRate = 0.1
typeRate = 0.01
#Random choices
mutation = np.random.rand()
randIndex = np.random.randint(0,len(child.spotList))
#Moves spot randomly
if mutation < moveRate:
#Boundary detection
child.spotList[randIndex].x_pos = np.random.randint(2,MAPWIDTH-2)
child.spotList[randIndex].y_pos = np.random.randint(2,MAPHEIGHT-2)
#Changes type of one spot randomly
if mutation < typeRate:
child.spotList[randIndex].spotType = np.random.randint(1,4)
return child
#Exchanges genetic information between parents
def crossover(population):
childSpotList=list()
#Selects parents and sorts by fitness
parentNum = 10
parents = pickParents(population,parentNum)
for p in parents:
p.spotList.sort(key=lambda x: x.getFitness(p.tilemap), reverse=True)
index = 0
#Exchanges genetic info
while len(childSpotList) < SPOTNUM:
#Random parent choice
parentChoice = np.random.randint(0,parentNum)
for i,p in enumerate(parents):
if parentChoice == i:
bestFit = p.spotList[index]
#Exclusivity
exclusiveFlag = True
for children in childSpotList:
if bestFit.x_pos == children.x_pos and bestFit.y_pos == children.y_pos:
exclusiveFlag = False
if exclusiveFlag:
childSpotList.append(bestFit)
else:
bestFit.x_pos = np.random.randint(2,MAPWIDTH-2)
bestFit.y_pos = np.random.randint(2,MAPHEIGHT-2)
index = (index+1) % SPOTNUM
return childSpotList
#Produces new population of evolved iMaps
def reproduce(population,tilemap):
newPopulation = list()
for i in range(SPOTNUM):
#Swaps genetic information from each parent AKA Crossover
childSpotList = crossover(population)
#Spawn a new child
child = iMap(tilemap,childSpotList)
#Apply random genetic variation
mutate(child)
#Evaluate fitness of mutant child
child.mapFitness()
#Add mutant child to new population
newPopulation.append(child)
return newPopulation
#Initialisations
DISPLAYSURF = Maps.uiInit()
#Generates new random map for algorithm to run inside
tilemap = Maps.createMap()
population = generatePopulation(50,tilemap)
highscore = 0
generation = 0
#Genetic Algorithm runs, displays every 10 generations
while True:
for i in range(10):
population = reproduce(population,tilemap)
#Gets best child for display
bestFitness = 0
for x,imap in enumerate(population):
if imap.fitness > bestFitness:
bestIndex = x
bestFitness = imap.fitness
if bestFitness > highscore:
highscore = bestFitness
bestIndividual = population[bestIndex]
print("New highscore! Fitness = ", highscore, ", Generation = ",generation)
data = [ bestIndividual, highscore, bestFitness, generation]
Maps.uiRefresh(population[bestIndex].tilemap, data, DISPLAYSURF)
generation+=1
| abtheo/Genetic-Wifi-Optimisation | Genetics.py | Genetics.py | py | 8,253 | python | en | code | 0 | github-code | 50 |
71159428636 | from cv2 import imwrite
import cv2
import numpy as np
from threading import Thread
import get_score
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
Thread.__init__(self, group, target, name, args, kwargs, daemon=daemon)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self):
Thread.join(self)
return self._return
def get_blurrness_score(image):
image = cv2.cvtColor((image), cv2.COLOR_BGR2GRAY)
fm = cv2.Laplacian(image, cv2.CV_64F).var()
return fm
def divideImage(img):
divisions=[]
row=img.shape[0]
col=img.shape[1]
color = (255, 0, 0)
thickness = 6
r_inc=int(row/4)
curr_r=0
while(curr_r<=(row-r_inc)):
curr_c=0
c_inc=int(col/4)
while(curr_c<=(col-c_inc)):
divisions.append([curr_r,(curr_r+r_inc),curr_c,(curr_c+c_inc)])
if(curr_c+c_inc>col):
c_inc=col-curr_c
curr_c+=c_inc
if(curr_r+r_inc>row):
r_inc=row-curr_r
curr_r+=r_inc
return divisions
def getBlurnessMatrix(img,divisions):
blurrness_mat=[]
for division in divisions:
segment=img[division[0]:division[1],division[2]:division[3]]
blurrness=get_blurrness_score(segment)
blurrness_mat.append([blurrness,division[0],division[1],division[2],division[3]])
blurrness_mat=np.array(blurrness_mat)
blurrness_mat=blurrness_mat[blurrness_mat[:,0].argsort()]
shape=blurrness_mat.shape
s=shape[0]
i=0
while(i<s):
if(i>=3 and i<s-3):
blurrness_mat=np.delete(blurrness_mat,i,0)
s-=1
i-=1
i+=1
return blurrness_mat
def getQuality(blurrness_mat,img):
quality_score=[]
threads=[]
for i in blurrness_mat:
img_segment=img[int(i[1]):int(i[2]),int(i[3]):int(i[4])]
p=ThreadWithReturnValue(target=get_score.getScore,kwargs={'img':img_segment})
threads.append(p)
for thread in threads:
thread.start()
for thread in threads:
quality_score.append(thread.join())
quality_score=np.array(quality_score)
return np.mean(quality_score)
def reduce_size(image):
print(image)
img = cv2.imread("uploads/"+image)
filename=image.split(".")
print(filename)
divisions=divideImage(img)
blurrness_mat=getBlurnessMatrix(img,divisions)
mean_quality=getQuality(blurrness_mat,img)
print("Quality: ",mean_quality)
cv2.imwrite(f"uploads/quality_{filename[0]}_compressed.jpg",img,[cv2.IMWRITE_JPEG_QUALITY,int(mean_quality)])
return f"quality_{filename[0]}_compressed.jpg"
| VaibhavPatil4240/Image-Size-Reducer-Flask | image_score.py | image_score.py | py | 2,803 | python | en | code | 0 | github-code | 50 |
8936662766 | import json
import os
import re
class Config():
"""Configuration management commands and information.
"""
def __init__(self):
# Checking for data directory
data_path = os.environ.get("TESTER_DATA_DIR_PATH")
# Ensure the data directory path to an absolute path
if data_path is None:
error_msg = "Environment variable TESTER_DATA_DIR_PATH not set."
raise InvalidDataDirectoryPathError(error_msg)
data_path = os.path.expanduser(data_path)
data_path = os.path.abspath(data_path)
self.data_path = data_path
# Check that the data directory exists and it is actually a directory
if not os.path.exists(self.data_path):
print(": Data path does not exist, creating it...")
os.mkdir(self.data_path)
if not os.path.isdir(self.data_path):
error_msg = "Data path does not point to a diretory. " +\
"Either delete what's there or change the path."
raise InvalidDataDirectoryPathError(error_msg)
# Set file and directory name per convention
self.context_file_name = "context.json"
self.question_dir_name = "_questions/"
self.tests_dir_name = "_tests/"
self.modules_file_name = "modules.json"
self.students_file_name = "students.json"
self.test_file_ext = "pdf"
self.test_header_file_name = "test_header.md"
self.solution_file_ext = "pdf"
self.solution_file_name = f"_solution.{self.solution_file_ext}"
self.solution_header_file_name = "solution_header.md"
self.custom_css_file_name = "custom.css"
self.pdf_options = {
"page-size": "Letter",
"margin-top": "0.5in",
"margin-right": "0.5in",
"margin-bottom": "0.5in",
"margin-left": "0.5in",
"encoding": "UTF-8",
"user-style-sheet": "test.css",
"log-level": "none"
}
self.email_body_file_name = "email_body.md"
self.email_server = None
self.question_dir_pattern = re.compile(r"[0-9]+")
self.question_file_pattern = re.compile(r"^[0-9]+\.md$")
self.json_indent = 4
# Load context info form config file
self.context_path = os.path.join(self.data_path, self.context_file_name)
self.context = {
"active_course": None
}
if not os.path.exists(self.context_path):
print(": No context file found, creating it...")
with open(self.context_path, "w+") as f:
json.dump(self.context, f, indent=self.json_indent)
with open(self.context_path, "r") as f:
loaded_context = json.load(f)
self.context.update(loaded_context)
if self.context["active_course"]:
self.active_course_path = os.path.join(self.data_path, self.context["active_course"])
self.active_course_path = os.path.abspath(self.active_course_path)
self.students_file_path = os.path.join(self.active_course_path, self.students_file_name)
self.students_file_path = os.path.abspath(self.students_file_path)
self.questions_dir_path = os.path.join(self.active_course_path, self.question_dir_name)
self.questions_dir_path = os.path.abspath(self.questions_dir_path)
self.modules_file_path = os.path.join(self.active_course_path, self.modules_file_name)
self.modules_file_path = os.path.abspath(self.modules_file_path)
self.test_header_path = os.path.join(self.active_course_path,
self.test_header_file_name)
self.test_header_path = os.path.abspath(self.test_header_path)
self.solution_header_path = os.path.join(self.active_course_path,
self.solution_header_file_name)
self.solution_header_path = os.path.abspath(self.solution_header_path)
self.custom_css_file_path = os.path.join(self.active_course_path,
self.custom_css_file_name)
self.email_body_file_path = os.path.join(self.active_course_path,
self.email_body_file_name)
else:
raise NoActiveCourseError("! Please activate a course first!")
def _save_context(self):
"""Saves out the current context to a JSON file.
"""
with open(self.context_path, "w") as f:
json.dump(self.context, f, indent=self.json_indent, sort_keys=True)
print(": Context updated.")
def _set_active_course(self, course_name):
"""Sets the currently active course and saves the context.
"""
self.context["active_course"] = course_name
self._save_context()
def _ensure_consecutiveness(self, list_type, number_list):
"""Asserts that a list of numbers are consecutive.
"""
if number_list != list(range(min(number_list), max(number_list)+1)):
raise QuestionsAreNotConsecutiveError("{} not consecutive.".format(list_type))
def get_students(self) -> dict:
"""Obtains the students dictionary from the active course.
"""
with open(self.students_file_path) as fin:
students = json.load(fin)
return students
def save_students(self, students: dict):
"""Saves the students dictionary to the active course.
"""
with open(self.students_file_path, "w") as fout:
json.dump(students, fout, indent=self.json_indent, sort_keys=True)
def get_modules(self) -> dict:
with open(self.modules_file_path, "r") as fin:
modules = json.load(fin)
return modules
def get_questions(self) -> dict:
"""Obtains questions from questions folder and ensures everything is proper.
"""
questions_nums = [
int(q) for q in os.listdir(self.questions_dir_path)
if self.question_dir_pattern.match(q)
]
if not questions_nums:
return []
questions_nums.sort()
self._ensure_consecutiveness("Question folders", questions_nums)
questions = {}
for q_num in questions_nums:
q_path = os.path.join(self.questions_dir_path, str(q_num))
q_options = [q for q in os.listdir(q_path) if self.question_file_pattern.match(q)]
assert q_options, "No options for question #{}".format(q_num)
q_options.sort()
questions[q_num] = {
"num": q_num,
"path": q_path,
"options": [
os.path.join(q_path, x) for x in os.listdir(q_path)
if self.question_file_pattern.match(x)
]
}
return questions
class InvalidDataDirectoryPathError(Exception):
pass
class NoActiveCourseError(Exception):
pass
class QuestionsAreNotConsecutiveError(Exception):
pass
| redkyn/tester | tester/config.py | config.py | py | 7,094 | python | en | code | 0 | github-code | 50 |
17538209497 | import numpy as np
import matplotlib.pyplot as mp
# 饼状图
# mp.pie(
# values, # 值列表
# spaces, # 扇形之间的间距列表
# labels=[], # 标签列表
# colors=[], # 颜色列表
# '%dd%%', # 标签所占比例格式
# shadow=True, # 是否显示阴影
# startangle=90, # 逆时针绘制饼状图时的起始角度
# radius=1 # 半径
# )
mp.figure('Pie', facecolor='lightgray')
# 整理数据
values = [26, 17, 21, 29, 11]
spaces = [0.05, 0.01, 0.01, 0.01, 0.01]
labels = ['Python', 'JavaScript', 'C++', 'Java', 'PHP']
colors = ['dodgerblue', 'orangered', 'limegreen', 'violet', 'gold']
mp.title('Pie', fontsize = 20)
# x, y轴等比例
mp.axis('equal')
mp.pie(values, spaces, labels, colors, '%.1f%%', shadow=True)
mp.tight_layout();
mp.show()
| yruns/Machine_Learning | DataAnalysis/Matplotlib/Pie.py | Pie.py | py | 867 | python | zh | code | 0 | github-code | 50 |
8861751687 |
from model.database import DatabaseEngine
from controller.member_controller import MemberController
from controller.event_controller import EventController
from controller.liste_controller import ListController
from exceptions import Error
from vue.member_vue import MemberVue
def main():
print("Bienvenue sur Malix")
# Init db
database_engine = DatabaseEngine(url='sqlite:///shop.db')
database_engine.create_database()
database_event = DatabaseEngine(url='sqlite:///shop.db')
database_event.create_database()
database_resa = DatabaseEngine(url='sqlite:///shop.db')
database_resa.create_database()
resa_controller = ListController(database_resa)
member_controller = MemberController(database_engine)
event_controller = EventController(database_event)
member_vue = MemberVue(member_controller, event_controller, resa_controller).member_shell()
if __name__ == "__main__":
main()
| Thhems/GLPOO-MALIX | MALIX/main_member.py | main_member.py | py | 935 | python | en | code | 0 | github-code | 50 |
4603196508 | from api.utils.subTaskResponse import get_subtask_reponse
from db.models.board import Task
from DTOs.reponseDtos.task import ResponseTask
def get_task_reponse(task: Task) -> ResponseTask:
subtasks = [ get_subtask_reponse(subtask) for subtask in task.subtasks if subtask.isCompleted == False] if len(task.subtasks) > 0 else []
res_task = ResponseTask(
id = task.id,
title = task.title,
description= task.description,
status= task.bucket.name,
pos = task.position,
subtasks = sorted(subtasks, key=lambda subtask: subtask.pos),
bucketId= task.bucket_id,
existing = True
)
return res_task | JacobSima/trello-backend-fastapi | api/utils/taskResponse.py | taskResponse.py | py | 633 | python | en | code | 0 | github-code | 50 |
70185082714 | c = int(input())
for i in range(c):
# Red, Green, Blue
r = {'points': 0, 'hourly': 'g', 'antihourly': 'b'}
g = {'points': 0, 'hourly': 'b', 'antihourly': 'r'}
b = {'points': 0, 'hourly': 'r', 'antihourly': 'g'}
p = int(input())
for h in range(p):
m, s = input().split()
m = m.lower()
s = s.lower()
eval(m)['points'] += 2 if eval(m)['hourly'] == eval(f"'{s}'") else 1
winner = ''
if r['points'] > g['points'] and r['points'] > b['points']:
winner = 'red'
elif g['points'] > r['points'] and g['points'] > b['points']:
winner = 'green'
elif b['points'] > r['points'] and b['points'] > g['points']:
winner = 'blue'
if winner:
print(winner)
elif r['points'] == g['points'] and r['points'] == b['points'] and g['points'] == b['points']:
print('trempate')
else:
print('empate')
| BrauUu/beecrowd-solutions | python/1875.py | 1875.py | py | 929 | python | en | code | 1 | github-code | 50 |
3193781768 | import requests
import logging
from app import app
from flask import render_template
class Mailer:
def __init__(self, app):
self.url = app.config.get("MAILGUN_URL")
self.auth = ("api", app.config.get("MAILGUN_API_KEY"))
self.sender = app.config.get("MAILGUN_USER")
def send_token(self, user, subject, template):
token = user.get_token()
self.__send_mail(
sender=self.sender,
recipient=user.email,
subject=subject,
messages=render_template(template, user=user, token=token),
)
def __send_mail(self, sender, recipient, subject, messages):
data = {
"from": sender,
"to": recipient,
"subject": subject,
"text": messages
}
try:
response = requests.post(url, auth=auth, data=data)
return response.raise_for_status()
except Exception as e:
logging.error(f"Error message: {e}")
| nsiregar/pegelinux | app/helper/mail_helper.py | mail_helper.py | py | 997 | python | en | code | 10 | github-code | 50 |
22056831034 | # Samuel Lockton ~ lockton.sam@gmail.com ~ 2022
from dateutil.relativedelta import relativedelta
import datetime
class timeframe(object):
def __init__(self, startTime, intervalLength, priceOpen, priceHigh, priceLow, priceClose):
self.startTime = startTime
self.intervalLength = intervalLength
self.calibrateEndTime(startTime, intervalLength)
self.priceOpen = priceOpen
self.priceHigh = None
self.priceLow = None
self.priceClose = None
if self.priceHigh == None:
self.priceHigh = priceHigh
if self.priceLow == None:
self.priceLow = priceLow
if self.priceClose == None:
self.priceClose = priceClose # the actual Support/Resistance Level will always be at timeframe close
def calibrateEndTime(self, startTime, intervalLength):
if intervalLength == relativedelta(years=+1):# Add interval of time and strip out smaller units of time
self.endTime = datetime.datetime.strptime((startTime+intervalLength).strftime("%Y"), "%Y")
if intervalLength == relativedelta(months=+1):
self.endTime = datetime.datetime.strptime((startTime+intervalLength).strftime("%Y %m"), "%Y %m")
if intervalLength == relativedelta(days=+7): # weeks run independantly sunday = 0, saturday = 6
self.endTime = datetime.datetime.strptime((startTime+relativedelta(days=+((7-int(startTime.strftime("%w")))%7)+1)).strftime("%Y %m %d"), "%Y %m %d")
if intervalLength == relativedelta(days=+1):
self.endTime = datetime.datetime.strptime((startTime+intervalLength).strftime("%Y %m %d"), "%Y %m %d")
if intervalLength == relativedelta(hours=+1):
self.endTime = datetime.datetime.strptime((startTime+intervalLength).strftime("%Y %m %d %H"), "%Y %m %d %H") | Theshlock/trader-v2 | sandbox/dev/timeframe.py | timeframe.py | py | 1,837 | python | en | code | 1 | github-code | 50 |
12165119996 | import os
images=[]
groundtruth=[]
path1='pictures' #need to have folder call pictures
#path11='pictures/الخليج'
path2='GroundTruth' #need to have folder call Groundtruth
def Num_of_dots(word):
Dictionary_dots = {"ب": 1, "ت": 2, "ث": 3,
"ج": 1, "خ": 1, "ذ": 1, "ز": 1,
"ش": 3, "ض": 1,
"ظ": 1, "غ": 1,
"ف": 1, "ق": 2,
"ن": 1, "ي": 2
}
count=0
for x in word:
if x in Dictionary_dots:
count+=Dictionary_dots[x]
print("Number of dots:",count)
return count
def Num_of_possition(word):
Dictionary_letters = {"أ": 1, "ا": 1, "آ": 1,"ء": 1, "إ": 1
,"ب": 2, "ت": 3, "ث": 4,
"ج": 5, "ح": 6, "خ": 7,
"د": 8, "ذ": 9, "ر": 10, "ز": 11,
"س": 12, "ش": 13, "ص": 14, "ض": 15,
"ط": 16, "ظ": 17, "ع": 18, "غ": 19,
"ف": 20, "ق": 21, "ك": 22, "ل": 23,
"م": 24, "ن": 25, "ه": 26, "و": 27, "ي": 28
}
string_possition=""
for x in word:
if x in Dictionary_letters:
string_possition+= str(Dictionary_letters[x]) + "-"
string_possition= string_possition[:-1]
print(string_possition)
return string_possition
myList = os.listdir(path1)
myList2 = os.listdir(path2)
noOfClasses = len(myList)
print("total number of classes detected",len(myList))
for x in myList:
i =0
y = path1+"/"+str(x)
print("y",y)
print("x", x)
os.mkdir(path2+"/"+str(x))
myPicList = os.listdir(path1 + "/" + str(x))
print(x)
myPicList = os.listdir(path1+"/"+str(x))
for z in myPicList:
z2= os.path.splitext(os.path.basename(z))[0]
numberOfDot = Num_of_dots(x)
numberOfPosition = Num_of_possition(x)
file = open(path2 + "/" + str(x) + "/" + z2 + ".txt", "w" ,encoding='utf-8')
file.write('Word: '+x+'\n'
+'Image Name: '+z+'\n'
+'Number of letters: '+str(len(x))+"\n"
+"Number of dots: "+str(numberOfDot)+'\n'
+'Position of letters: '+numberOfPosition)
file.close()
print("ww",myPicList[i])
print("this ",os.listdir(path1+"/"+str(x)))
print("-----------------")
| mohammadyahyaq/Eyfad-Project | ground truth generator/createGroundTruthFile (old version).py | createGroundTruthFile (old version).py | py | 2,538 | python | en | code | 1 | github-code | 50 |
71468012316 |
n, a, b = [int(v) for v in input().split()]
x_list = [int(v) for v in input().split()]
# dp[i][j] := i 個のカード(0~i-1)を使った合計値を A で割った余りが j となるかどうか
dp = [[False] * (a + 1) for _ in range(n + 1)]
dp[0][0] = True
exist_sum_mod_a_is_b = False
# 配る DP
for i in range(n):
x = x_list[i]
for j in range(a):
if dp[i][j] == False:
continue
# カードを選ばない時
dp[i + 1][j] = True
# カードを選ぶ時
sum_mod_a = (j + x) % a
dp[i + 1][sum_mod_a] = True
if dp[n][b]:
exist_sum_mod_a_is_b = True
ans = "Yes" if exist_sum_mod_a_is_b else "No"
print(ans)
| ksato-dev/algo_method | 7_dp5/q3_6.py | q3_6.py | py | 693 | python | en | code | 0 | github-code | 50 |
22564313099 | import torch
import numpy as np
class Network(torch.nn.Module):
def __init__(self, lr_network=0.01, lr_likelihood=0.01):
super(Network, self).__init__()
self.fc1 = torch.nn.Linear(28 * 28, 256)
self.fc2 = torch.nn.Linear(256, 128)
self.fc3 = torch.nn.Linear(128, 128)
self.fc4 = torch.nn.Linear(128, 10)
self.network_parameters = self.parameters()
self.network_parameters_as_vector = torch.cat([param.view(-1) for param in self.parameters()])
self.likelihood = torch.nn.Linear(self.network_parameters_as_vector.size()[0], 1, bias=False)
self.likelihood_parameters = (param for name, param in self.named_parameters() if name.startswith("likelihood"))
self.network_optimizer = torch.optim.SGD(
self.network_parameters, lr=lr_network, momentum=0.5, nesterov=True)
self.likelihood_optimizer = torch.optim.SGD(
self.likelihood_parameters, lr=lr_likelihood, momentum=0.5, nesterov=True)
def forward(self, x):
x = x.view(-1, 28*28)
x = torch.nn.functional.relu(self.fc1(x))
x = torch.nn.functional.relu(self.fc2(x))
x = torch.nn.functional.relu(self.fc3(x))
x = torch.nn.functional.log_softmax(self.fc4(x))
return x
def optimize(self, loss):
self.network_optimizer.zero_grad()
self.likelihood_optimizer.zero_grad()
loss.backward()
self.likelihood_optimizer.step()
self.network_optimizer.step()
def similarity_loss(self, alpha=0.9):
w = self.network_parameters_as_vector.detach().cuda()
theta = self.likelihood.weight.view(-1)
w_len, theta_len = (w.dot(w), theta.dot(theta))
loss_1 = 1.0 - self.likelihood(w.view(1, -1)) / (torch.sqrt(w_len)*torch.sqrt(theta_len))
loss_2 = 1.0 - torch.min(torch.cat([w_len, theta_len])) / torch.max(torch.cat([w_len, theta_len]))
return torch.squeeze(loss_1*alpha + loss_2*(1-alpha))
| TolgaOk/Catastrophic-Forgetting | Ideas/use_all/mutable_elasticity/network.py | network.py | py | 2,099 | python | en | code | 1 | github-code | 50 |
7964152452 | import sqlite3
from sqlite3 import Error
class SQLite:
def __init__(self, db_file):
self.db_file = db_file
def connect(self):
try:
conn = sqlite3.connect(self.db_file)
return conn
except Error as e:
print(e)
def execute(self, sql, params=None):
conn = self.connect()
cur = conn.cursor()
if params is None:
cur.execute(sql)
else:
cur.execute(sql, params)
conn.commit()
conn.close()
return cur.lastrowid
| harukary/gpt_recipe_app | server/api/core/database.py | database.py | py | 557 | python | en | code | 0 | github-code | 50 |
7470187751 | import json
import requests
def send_to_me_message(access_token: str, template: dict):
"""
나에게 메시지 보내기
"""
header = {"Authorization": 'Bearer ' + access_token}
url = "https://kapi.kakao.com/v2/api/talk/memo/default/send"
data = {"template_object": json.dumps(template)}
return requests.post(url, headers=header, data=data)
def send_to_you_message(access_token: str, template: dict):
"""
타인에게 메시지 보내기
"""
from conf.settings.prod import settings
uuid = settings.KAKAO_MY_UUID
header = {"Authorization": 'Bearer ' + access_token}
url = "https://kapi.kakao.com/v1/api/talk/friends/message/default/send"
data = {
"receiver_uuids": f'["{uuid}"]',
"template_object": json.dumps(template)
}
return requests.post(url, headers=header, data=data)
class MessageTemplate:
@classmethod
def default_text(cls, message):
return {
'object_type': 'text',
'text': message,
'link': {
'web_url': '',
'mobile_web_url': ''
}
}
| lee-lou2/fastapi | apps/backend/external/kakao/controllers/message.py | message.py | py | 1,135 | python | en | code | 2 | github-code | 50 |
21022434689 | from __future__ import print_function, unicode_literals
import concurrent.futures
import hashlib
import json
import logging
import os
import posixpath
import shutil
import six
PYPI_PREFIX = 'https://pypi.org'
MAX_WORKERS = os.environ.get('PYTEST_PYPI_GATEWAY_MAX_THREAD')
logger = logging.getLogger('pytest.pypi-gateway')
class SimplePageParser(six.moves.html_parser.HTMLParser):
"""Parser to process a simple API page.
"""
def __init__(self, *args, **kwargs):
super(SimplePageParser, self).__init__(*args, **kwargs)
self.links = {}
self._current = None
def handle_starttag(self, tag, attrs):
if tag.lower() == 'a':
for key, value in attrs:
if key.lower() == 'href':
url, qs = six.moves.urllib.parse.urldefrag(value)
hsh = six.moves.urllib.parse.parse_qs(qs)['sha256'][0]
self._current = (url, 'sha256', hsh)
def handle_data(self, data):
if self._current is not None:
self.links[data] = self._current
def handle_endtag(self, tag):
if tag.lower() == 'a':
self._current = None
def iter_filename_version(name):
url = posixpath.join(PYPI_PREFIX, 'pypi', name, 'json')
with six.moves.urllib.request.urlopen(url) as conn:
data = conn.read()
for version, infos in json.loads(data.decode('utf-8'))['releases'].items():
for info in infos:
yield info['filename'], version
def get_info(name):
url = posixpath.join(PYPI_PREFIX, 'simple', name)
with six.moves.urllib.request.urlopen(url) as conn:
data = conn.read()
parser = SimplePageParser()
parser.feed(data.decode('utf-8'))
versions = dict(iter_filename_version(name))
return parser.links, versions
def get_file_hash(htype, filename):
h = hashlib.new(htype)
with open(filename, 'rb') as f:
while True:
data = f.read(65535)
if not data:
break
h.update(data)
return h.hexdigest()
def mkdir_p(name):
try:
os.makedirs(name)
except OSError as e:
import errno
if e.errno != errno.EEXIST:
raise
def ensure_file(url, htype, hvalue, spec, filedir):
filename = os.path.join(filedir, spec)
if os.path.exists(filename):
digest = get_file_hash(htype, filename)
if digest == hvalue:
logger.info('Skipping {}'.format(filename))
return
logger.warn('Replacing {}'.format(filename))
os.unlink(filename)
mkdir_p(os.path.dirname(filename))
six.moves.urllib.request.urlretrieve(url, filename)
logger.info('Downloaded {}'.format(filename))
def ensure_json(name, version, specs, jsondir):
filename = os.path.join(jsondir, name, version, 'data.json')
if os.path.exists(filename):
logger.info('Skipping {}'.format(filename))
return
mkdir_p(os.path.dirname(filename))
url = posixpath.join(PYPI_PREFIX, 'pypi', name, version, 'json')
with six.moves.urllib.request.urlopen(url) as conn:
pypi_data = json.loads(conn.read().decode('utf-8'))
data = {
'info': pypi_data['info'],
'urls': [
# The URL entry needs to be regenerated at runtime.
{k: v for k, v in entry.items() if k != 'url'}
for entry in pypi_data['urls']
if entry['filename'] in specs
],
}
with open(filename, 'w') as f:
json.dump(data, f, ensure_ascii=True)
logger.info('Generated {}'.format(filename))
def iter_ensure_package_files(exe, config, links, versions, filedir, jsondir):
for name, specs in config.items():
for spec in specs:
try:
url, htype, hvalue = links[spec]
except KeyError:
logger.warn('Failed to find link for {}'.format(spec))
continue
yield exe.submit(ensure_file, url, htype, hvalue, spec, filedir)
try:
version = versions[spec]
except KeyError:
logger.warn('Failed to find version for {}'.format(spec))
continue
yield exe.submit(ensure_json, name, version, set(specs), jsondir)
def ensure_packages(config, filedir, jsondir):
links = {}
versions = {}
names = list(config)
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as exe:
for name_links, name_versions in exe.map(get_info, names):
links.update(name_links)
versions.update(name_versions)
try:
with open(os.path.join(jsondir, 'config.json')) as f:
prev_config = json.load(f)
except Exception:
prev_config = None
if prev_config != config:
logger.info('Configuration changed, rebuilding JSON')
shutil.rmtree(jsondir)
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as exe:
futures = list(iter_ensure_package_files(
exe, config, links, versions, filedir, jsondir,
))
for future in concurrent.futures.as_completed(futures):
future.result()
with open(os.path.join(jsondir, 'config.json'), 'w') as f:
json.dump(config, f)
| uranusjr/pytest-pypi-gateway | src/pytest_pypi_gateway/packages.py | packages.py | py | 5,273 | python | en | code | 0 | github-code | 50 |
16528109002 | import json
import urlparse
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import fetch
data = fetch.fetch()
class Handler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def parse_path(self, path):
if '?' in path:
p, q = path.split('?')[:2]
return p, urlparse.parse_qs(q, keep_blank_values=True)
else:
return path, {}
def process_request(self, request):
path, query = self.parse_path(request)
print("path: {}, query: {}".format(path, query))
if path == '/full_data_raw':
return json.dumps(data)
else:
return 'unknown query'
def do_GET(self):
self._set_headers()
self.wfile.write(self.process_request(self.path))
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
self._set_headers()
self.wfile.write("please use GET requests")
def run(server_class=HTTPServer, handler_class=Handler, port=80):
server_address = ('', port)
server = server_class(server_address, handler_class)
print('serving')
server.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run(port=8080)
| Akshay666/bagel | backend.py | backend.py | py | 1,252 | python | en | code | 0 | github-code | 50 |
31129942905 | import unittest
from unittest import TestCase
from src.main.python.lib_books import Books
class TestBooks(TestCase):
def test_display_books(self):
try:
self.bk = Books('Hina')
self.bk.display_books()
except FileNotFoundError:
self.assertRaises(FileNotFoundError)
if __name__ == '__main__':
unittest.main()
| Hinakoushar-Tatakoti/Library-Management-System | src/unittest/python/books_tests.py | books_tests.py | py | 371 | python | en | code | 0 | github-code | 50 |
12274245301 | import cv2
import numpy as np
import pickle
# 模型檔路徑
model_filename = 'main/api/svm_model.pkl'
with open(model_filename, 'rb') as file:
loaded_model = pickle.load(file)
# 預測硬幣圖像
def predict_coin(image):
r_channel = list(image[:, :, 2].reshape(-1)) # 提取R通道
g_channel = list(image[:, :, 1].reshape(-1)) # 提取G通道
b_channel = list(image[:, :, 0].reshape(-1)) # 提取B通道
rgb = r_channel + g_channel + b_channel
predicted_label = loaded_model.predict([rgb])
return predicted_label
# 邊緣檢測
def sobelEdgeDetection(f):
grad_x = cv2.Sobel(f, cv2.CV_32F, 1, 0, ksize=3)
grad_y = cv2.Sobel(f, cv2.CV_32F, 0, 1, ksize=3)
magnitude = abs(grad_x) + abs(grad_y)
g = np.uint8(np.clip(magnitude, 0, 255))
ret, g = cv2.threshold(g, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return g
def coinDetect(_img):
# ===============降噪================
img_gray = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)
img_gauss = cv2.GaussianBlur(img_gray, (9, 9), 1)
thresh, img_binary_G = cv2.threshold(img_gauss, 127, 255, cv2.THRESH_BINARY_INV)
closing = cv2.morphologyEx(img_binary_G, cv2.MORPH_CLOSE, np.ones((3, 3), dtype=np.uint8))
# cv2.imshow('closing', closing)
# ==============邊緣檢測==============
img_sobel = sobelEdgeDetection(img_gray)
img_sobel = cv2.dilate(img_sobel, np.ones((3, 3), dtype=np.uint8), iterations=1)
cnts, hierarchy = cv2.findContours(img_sobel, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
cv2.drawContours(img_sobel, cnts, -1, 255, -1)
# cv2.imshow('img_sobel', img_sobel)
# ========合併前兩者的硬幣抓取=========
closing = cv2.add(closing, img_sobel)
cnts, hierarchy = cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
cv2.drawContours(closing, cnts, -1, 255, -1)
# cv2.imshow('coin', closing)
# 確定背景區域
sure_bg = cv2.dilate(closing, np.ones((9, 9), dtype=np.uint8), iterations=1)
# cv2.imshow('sure_bg', sure_bg)
# 尋找前景區域
dist_transform = cv2.distanceTransform(closing, cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.5 * dist_transform.max(), 255, 0)
# cv2.imshow('sure_fg', sure_fg)
# 找到未知區域
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# cv2.imshow('unknown', unknown)
# 類別標記
ret, markers = cv2.connectedComponents(sure_fg)
# 為所有的標記加1,保證背景是0而不是1
markers = markers + 1
# 現在讓所有的未知區域為0
markers[unknown == 255] = 0
markers_temp = markers.copy()
markers = np.uint8(markers)
# cv2.imshow('markers', markers * 30)
markers = markers_temp
markers = cv2.watershed(_img, markers)
_result = np.zeros((720, 1080), np.uint8)
for i in range(2, ret + 1):
_result[markers == i] = 255
_result = cv2.erode(_result, np.ones((5, 5), dtype=np.uint8), iterations=1)
# cv2.imshow('result', _result)
return _result
def start(_img):
_img = cv2.resize(_img, (1080, 720))
img_ori = _img.copy()
_result = coinDetect(_img)
cnts, hierarchy = cv2.findContours(_result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
_coinAmount = [0, 0, 0, 0] # 硬幣數量(list[0]: 50元數量、list[1]: 10元數量、list[2]: 5元數量、list[3]: 1元數量
# total = 0
for cnt in cnts:
area = cv2.contourArea(cnt)
if area <= 500: continue
(x, y, w, h) = cv2.boundingRect(cnt)
cv2.circle(_img, (int(x + w / 2), int(y + h / 2)), int((w + h) / 4), (0, 0, 255), 2)
img_pic = img_ori[y - 20:y + h + 20, x - 20:x + w + 20]
# (x, y, w, h) = str(x), str(y), str(w), str(h)
# cv2.imwrite('D:/school/embedded/data/' + x + y + w + h +'.png',img_pic)
img_pic = cv2.resize(img_pic, (45, 45))
predicted_label = predict_coin(img_pic)
if predicted_label == 11 or predicted_label == 12:
cv2.putText(_img, '1NT$', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (255, 0, 0), 3, cv2.LINE_AA)
_coinAmount[3] += 1
# total += 1
elif predicted_label == 51 or predicted_label == 52:
cv2.putText(_img, '5NT$', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (255, 0, 0), 3, cv2.LINE_AA)
_coinAmount[2] += 1
# total += 5
elif predicted_label == 101 or predicted_label == 102 or predicted_label == 103:
cv2.putText(_img, '10NT$', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (255, 0, 0), 3, cv2.LINE_AA)
_coinAmount[1] += 1
# total += 10
elif predicted_label == 501 or predicted_label == 502:
cv2.putText(_img, '50NT$', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (255, 0, 0), 3, cv2.LINE_AA)
_coinAmount[0] += 1
# total += 50
return _img, _coinAmount
if __name__ == '__main__':
filename = "coin.jpg"
img = cv2.imread(filename)
cv2.imshow('img', img)
img, coinAmount = start(img)
print(coinAmount)
cv2.imshow('result', img)
cv2.waitKey()
cv2.destroyAllWindows()
| San-Zero/EmbeddedSystemDesign | main/api/coinDetect.py | coinDetect.py | py | 5,168 | python | en | code | 0 | github-code | 50 |
2864001216 | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class PricePrediction:
url = "D:\Projects\ASET\price-monitoring\Data\Bucharest_HousePriceDataset.csv"
column_names = ['NrCamere', 'Suprafata', 'Etaj', 'TotalEtaje', 'Sector', 'Scor', 'Pret']
def __init__(self):
print("init")
def reading_data(self):
f = open(self.url, "r")
column_names = f.readline().replace("\n", "").replace(" ", "").split(",")
print(column_names)
products = []
for line in f:
product = list(map(float, line.replace("\n", "").split(",")))
product[-1] = product[-1] / 1000
products.append(product)
return column_names, products
def analysis(self):
column_names, products = self.reading_data()
for i in range(0, len(column_names)):
print(column_names[i], min(p[i] for p in products), max(p[i] for p in products))
def preprocessing(self):
np.set_printoptions(precision=3, suppress=True)
raw_dataset = pd.read_csv(self.url, names=self.column_names,
na_values='?', comment='\t',
sep=',', skipinitialspace=True)
raw_dataset = raw_dataset.apply(pd.to_numeric, errors='coerce')
dataset = raw_dataset.copy()
print(dataset.tail())
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('Pret')
test_labels = test_features.pop('Pret')
return train_features, test_features, train_labels, test_labels
def plot_surface(self, features, labels, x, y):
plt.scatter(features['Suprafata'], labels, label='Data')
plt.plot(x, y, color='k', label='Predictions')
plt.xlabel('Suprafata')
plt.ylabel('Pret')
plt.legend()
plt.show()
def build_and_compile_model(self, norm):
model = keras.Sequential([
norm,
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.Adam(0.001))
return model
def make_model(self):
train_features, test_features, train_labels, test_labels = self.preprocessing()
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))
print(normalizer.mean.numpy())
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print('Normalized:', normalizer(first).numpy())
surface = np.array(train_features['Suprafata'])
surface_normalizer = preprocessing.Normalization(input_shape=[1, ])
surface_normalizer.adapt(surface)
surface_model = tf.keras.Sequential([surface_normalizer, layers.Dense(units=1)])
surface_model.summary()
surface_model.predict(surface[:10])
surface_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1), loss='mean_absolute_error')
history = surface_model.fit(train_features['Suprafata'], train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split=0.2)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
test_results = {'surface_model': surface_model.evaluate(test_features['Suprafata'], test_labels, verbose=0)}
x = tf.linspace(0.0, 250, 251)
y = surface_model.predict(x)
# self.plot_surface(train_features, train_labels, x, y)
linear_model = tf.keras.Sequential([normalizer, layers.Dense(units=1)])
linear_model.predict(train_features[:10])
linear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1), loss='mean_absolute_error')
history = linear_model.fit(
train_features, train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split=0.2)
test_results['linear_model'] = linear_model.evaluate(
test_features, test_labels, verbose=0)
surface_model = self.build_and_compile_model(surface_normalizer)
surface_model.summary()
surface_model.save('../machine_learning/machinelearning')
def load_model(self):
surface_model = keras.models.load_model('../machine_learning/machinelearning')
surface_model.summary()
print(type(surface_model))
return surface_model
def make_prediction(self, product):
self.load_model()
# preproces the input
# give it to model
# postprocess output
return 60000
if __name__ == '__main__':
pricePrediction = PricePrediction()
# pricePrediction.make_model()
pricePrediction.load_model()
| Marcel1123/price-monitoring | Python/machine_learning/price_prediction.py | price_prediction.py | py | 5,519 | python | en | code | 0 | github-code | 50 |
24917963449 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from picar_4wd.pwm import PWM
from picar_4wd.adc import ADC
from picar_4wd.pin import Pin
from picar_4wd.motor import Motor
from picar_4wd.servo import Servo
from picar_4wd.ultrasonic import Ultrasonic
from picar_4wd.speed import Speed
from picar_4wd.filedb import FileDB
from picar_4wd.utils import *
import picar_4wd as fc
import time
import math
def turn_right_deg(w=13, l=13, deg=90):
print('right'+str(deg))
d = (w**2+l**2)**0.5*math.pi*deg/360
print(d)
a = int(d)
speedd = Speed(a)
speedd.start()
fc.turn_right(100)
x = 0
for i in range(11):
time.sleep(0.05)
speed = speedd()
x += speed * 0.1
print("%smm/s" % speed)
print("%smm" % x)
speedd.deinit()
fc.stop()
def turn_left_deg(w=13, l=13, deg=90):
print('left'+str(deg))
d = (w**2+l**2)**0.5*math.pi*deg/360
print(d)
a = int(d)
speedd = Speed(a)
speedd.start()
fc.turn_left(100)
x = 0
for i in range(1):
time.sleep(0.1)
speed = speedd()
x += speed * 0.1
print("%smm/s" % speed)
print("%smm" % x)
speedd.deinit()
fc.stop()
def test3():
speed4 = Speed(25)
speed4.start()
# time.sleep(2)
fc.forward(100)
x = 0
for i in range(1):
time.sleep(0.1)
speed = speed4()
x += speed * 0.1
print("%smm/s"%speed)
print("%smm"%x)
speed4.deinit()
fc.stop() | RASPBERY-PICAR/LAB1_PICAR | examples/helper_functions.py | helper_functions.py | py | 1,476 | python | en | code | 0 | github-code | 50 |
26666511990 | __all__ = ["roll_stalta"]
import os
import numpy as np
import subprocess
from pycheron.util.logger import Logger
from pathlib2 import Path
# Example on how to recomplie seismicRoll lib
# ----------------------
# from numpy import f2py
# with open("/Users/jbobeck/pycheron/rollseis/seismicRoll.f", "r") as myfile:
# source = myfile.read()
# f2py.compile(source, "seismicRoll")
import platform
if platform.system() != "Windows":
try:
import pycheron.seismicRoll as seismicRoll
except ImportError:
fpath = os.path.dirname(__file__)
pwd = os.getcwd()
if pwd != str(Path(fpath).parent):
os.chdir(str(Path(fpath).parent))
print("------------------------------------------------------")
print("Building Fortran Library")
print("------------------------------------------------------")
subprocess.call(
[
"f2py",
"-c",
"-m",
"seismicRoll",
"--quiet",
os.path.dirname(__file__) + "/seismicRoll.f90",
]
)
os.chdir(pwd)
try:
import pycheron.seismicRoll as seismicRoll
except ImportError:
import seismicRoll
def roll_stalta(x, n_sta, n_lta, increment=1, fortran=False, logger=None):
"""
Simple rolling STA/LTA ratio calculation utilized for automatic detection of seismic signal arrival times. Python
version of roll_stalta_numeric_vector function in source_files.cpp seismicRoll R package
"``roll_stalta`` doesn't do any preprocessing of incoming data and merely calculates the ratio of the average value
in the STA window to the average value in the LTA window. Windows are aligned so that the index is at the left edge
of the STA window and at the right edge of the LTA window, e.g., [#]_
.. math::
STA(x_{i}) = (1/ns)* \sum_{j = i}^{i+ns} x_{i}
LTA(x_{i}) = (1/nl) * \sum_{j=i-nl}^{i} x_{i}
r_{i} = STA_{i}/LTA_{i}
.. code-block:: console
[---------- LTA --------*]........
.......................[*- STA --]
For proper use of this algorithm seismic data should be preprocessed in following manner:
demean, detrend and taper the raw signal
square the processed signal to get power" (Callahan, 2020)
:param x: input data vector
:type x: numpy.array
:param n_sta: integer STA window size
:type n_sta: int
:param n_lta: integer LTA window size
:type n_lta: int
:param increment: "increment shift to use when sliding the window to the next location. For increments greater
than one, the rolling means will not align properly, hence the need for a dedicated
``roll_stalta`` function. Setting increment to a value greater than 1 will result in NaNs for
all skipped over indices." (Callahan, 2020)
:type increment: int
:param fortran: Whether to use Fortran or not. **Note:** Linux/iOS only
:type fortran: bool
:param logger: logger object
:type logger: pycheron.util.logger.Logger
:return: Returns vector of values of the same length as x with each point containing the STA/LTA ratio at that point
:rtype: numpy.array
.. note:: "Values within n_lta - 1 of the beginning and n_sta - 1 of the end are set to NaNs" (Callahan, 2020)
* Code originally ported from seismicRoll R Cran Package
(Callahan, J., R. Casey, M. Templeton, and G. Sharer (2020, July 8). CRAN-Package seismicRoll. The Comprehensive R
Archive Network. Retrieved from https://cran.r-project.org/web/packages/seismicRoll.index) and
augmented and adapted for use within Pycheron. This code is equivalent to IRIS's seismicRoll
roll_stalta_numeric_vector in the source_files.cpp function.
**Example**
.. code-block:: python
import numpy as np
from pycheron.rollseis.roll_stalta import roll_stalta
import obspy
from pycheron.psd.noise.deadChannel import DDT
#Contrived example:
x = [1,5,3,2,1]
x = np.repeat(x,20)
#calculate rolling_stalta with n_sta = 3, n_lta = 6, increment = 1 for above vector x
p = roll_stalta(x,3,6)
**Plotting**
.. code-block:: python
import matplotlib.pyplot as plt
# Plot original data as black triangles
plt.plot(x,marker='^',color = 'black', linestyle = 'None', label = 'data')
#Plot rolling_stalta in red circles
plt.plot(p, marker = 'o', color = 'red', markerfacecolor = 'None', label = 'STA/LTA')
#Adjust plot limits to show upper/lower data points more clearly
plt.xlim([-1,101])
plt.ylim([0,6])
#Add title and legend
plt.title('Test of roll_stalta on contrived example')
plt.legend(loc = 'upper right')
.. image:: _static/roll_stalta.png
.. rubric:: References
.. [#] http://en.wikipedia.org/wiki/First_break_picking
"""
# Set up logger
if logger is None:
logger = Logger(None)
# If using fortran
if fortran:
# Ensure integer window sizes not greater than len x for n_sta and n_lta
if n_sta > len(x):
logger.error("roll_stalta(): n_sta cannot be greater than len x")
return
elif n_lta > len(x):
logger.error("roll_stalta(): n_lta cannot be greater than len x")
return
# Avoid infinite loops
if increment < 1:
logger.error("roll_stalta(): Increment must be >= 1")
return
# Initialize output vector and call the fortran version of the code base to calculate the rolling STA/LTA
out = seismicRoll.roll_stalta(x, n_sta, n_lta, increment)
out[out == 0] = np.nan
# Otherwise still ensure the same thing but don't need to call the fortran version of the code base
else:
# Ensure integer window sizes not greater than len x
if n_sta > len(x):
logger.error("roll_stalta(): n_sta cannot be greater than len x")
return
elif n_lta > len(x):
logger.error("roll_stalta(): n_lta cannot be greater than len x")
return
# Avoid infinite loops
if increment < 1:
logger.error("roll_stalta(): Increment must be >= 1")
return
# Initialize output vector to the length of x and fill with nans
out = np.full(len(x), np.nan)
# Set ind to n_lta
ind = n_lta
# For valid region, calculate the rolling STA/LTA result
while ind < (len(x) - n_sta):
out[ind] = stalta_python(x, n_sta, n_lta, ind)
ind += increment
return out
def stalta_python(x, n_sta, n_lta, ind):
"""
This is a simple ratio of two rolling means that is used to detect seismic signal arrivals. Python
version of roll_stalta function in source_files.cpp seismicRoll R package
IN:
:param x: (vector) - input data
:param n_sta: (int) - integer STA window size (sec)
:param n_lta: (int) - integer LTA window size (sec)
:param ind: (int) - index looping over
OUT:
:return: (vector) - out - "Returns a vector of the same length as the incoming data with NaNs in the LTA window
length at the left end and in the STA window length at the right end" (Callahan, 2020)
* Code originally ported from seismicRoll R Cran Package
(Callahan, J., R. Casey, M. Templeton, and G. Sharer (2020, July 8). CRAN-Package seismicRoll. The Comprehensive R
Archive Network. Retrieved from https://cran.r-project.org/web/packages/seismicRoll.index) and
augmented and adapted for use within Pycheron. This code is equivalent to IRIS's seismicRoll
roll_stalta in the source_files.cpp function.
"""
# Initialize total to 0
total = 0
# Loop through integer STA window size and update the total to index + integer STA window size.
# Calculate the STA, aligned so that the window is right of ind (including ind). STA is total / integer STA window
# size
for i in range(n_sta):
total += x[ind + i]
sta = total / n_sta
# Initialize total again for LTA
total = 0
# Loop through integer STA window size and update the total to index - integer LTA window size.
# Calculate the LTA, aligned so that the window is left of ind (including ind). LTA is total/ integer LTA window
# size
for i in range(n_lta):
total += x[ind - i]
lta = total / n_lta
# Calculate STA/LTA ratio
out = sta / lta
return out
| sandialabs/pycheron | pycheron/rollseis/roll_stalta.py | roll_stalta.py | py | 8,671 | python | en | code | 20 | github-code | 50 |
18222962327 | from __future__ import print_function
import ast
import atexit # for atexit.register()
import functools
import glob
import operator
import os
import pickle
import re
# replacement for functions from the commands module, which is deprecated.
import subprocess
import sys
import time
try:
from setuptools import sysconfig
except:
from distutils import sysconfig
import SCons
# scons does not like targets that come and go (if cleaning, if python,
# etc). All targets are needed for proper cleaning. If a target should
# not be built (if not python), then do not include the target in the
# next layer sources.
# scons gets confused by targets that are not a real file (shmclean, etc.).
# Set them Pseudo (like in a Makefile) and use an Alias() for them.
# Note: Do not use context.TryRun() as that breaks cross-compiling
# Facilitate debugging with pdb.
# At pdb startup, the environment is such that setting breakpoints in
# SConstruct requires specifying its full absolute path, which is incovenient.
# Stopping here at an automatic breakpoint makes this easier. Note that this
# code has no effect unless pdb is loaded.
# To use this, run with pdb and continue from the initial pdb prompt.
pdb_module = sys.modules.get('pdb')
if pdb_module:
pdb_module.set_trace()
pass # Breakpoint default file is now SConscript
# gpsd needs Scons version at least 2.3
EnsureSConsVersion(2, 3, 0)
# gpsd needs Python version at least 2.6
EnsurePythonVersion(2, 6)
# By user choice, or due to system-dependent availability, the scons
# executable may be called using names other than plain "scons",
# e.g. "scons-3" on CentOS 8.
scons_executable_name = os.path.basename(sys.argv[0]) or 'scons'
# Have scons rebuild an existing target when the source(s) MD5 changes
# Do not use time to prevent rebuilding when sources, like gpsd_config.h,
# are rebuilt, but with no changes.
Decider('MD5')
# Put .sconsign*dblite and .scons-options-cache in variantdir for
# one-touch cleaning
# support building with various Python versions.
sconsign_file = '.sconsign.%d.dblite' % pickle.HIGHEST_PROTOCOL
SConsignFile(os.getcwd() + os.path.sep + sconsign_file)
# Start by reading configuration variables from the cache
opts = Variables('.scons-option-cache')
# ugly hack from http://www.catb.org/esr/faqs/practical-python-porting/
# handle python2/3 strings
def polystr(o):
if bytes is str: # Python 2
return str(o)
# python 3.
if isinstance(o, str):
return o
if isinstance(o, (bytes, bytearray)):
return str(o, encoding='latin1')
if isinstance(o, int):
return str(o)
raise ValueError
def strtobool (val):
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
else:
raise ValueError("invalid truth value %r" % (val,))
# Helper functions for revision hackery
def GetMtime(file):
"""Get mtime of given file, or 0."""
try:
return os.stat(file).st_mtime
except OSError:
return 0
def FileList(patterns, exclusion=None):
"""Get list of files based on patterns, minus excluded path."""
files = functools.reduce(operator.add, map(glob.glob, patterns), [])
for file in files:
if file.find(exclusion):
files.remove(file)
return files
# FIXME: replace with TryAction()
def _getstatusoutput(cmd, nput=None, shell=True, cwd=None, env=None):
pipe = subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(output, errout) = pipe.communicate(input=nput)
status = pipe.returncode
return (status, output)
# Workaround for old SCons bug that couldn't overwrite existing symlinks
# This was fixed in 2.3.2, but we allow 2.3.0 (e.g., on Ubuntu 14)
#
# In the troublesome versions, we monkey-patch os.symlink to bully it through
standard_os_symlink = os.symlink
def _forced_symlink(source, link_name):
try:
standard_os_symlink(source, link_name)
except OSError:
# Out of paranoia, only do this when the target is a symlink
if os.path.islink(link_name):
os.remove(link_name)
standard_os_symlink(source, link_name)
else:
raise
if SCons.__version__ in ['2.3.0', '2.3.1']:
os.symlink = _forced_symlink
# SCons 2.3.0 is also missing the Psuedo method. See the workaround after
# the initial 'env' setup.
# All man pages. Always build them all.
all_manpages = {
"man/cgps.1": "man/cgps.adoc",
"man/gegps.1": "man/gegps.adoc",
"man/gps.1": "man/gps.adoc",
"man/gps2udp.1": "man/gps2udp.adoc",
"man/gpscsv.1": "man/gpscsv.adoc",
"man/gpscat.1": "man/gpscat.adoc",
"man/gpsctl.1": "man/gpsctl.adoc",
"man/gpsd.8": "man/gpsd.adoc",
"man/gpsdebuginfo.1": "man/gpsdebuginfo.adoc",
"man/gpsdctl.8": "man/gpsdctl.adoc",
"man/gpsdecode.1": "man/gpsdecode.adoc",
"man/gpsd_json.5": "man/gpsd_json.adoc",
"man/gpsfake.1": "man/gpsfake.adoc",
"man/gpsinit.8": "man/gpsinit.adoc",
"man/gpsmon.1": "man/gpsmon.adoc",
"man/gpspipe.1": "man/gpspipe.adoc",
"man/gpsplot.1": "man/gpsplot.adoc",
"man/gpsprof.1": "man/gpsprof.adoc",
"man/gpsrinex.1": "man/gpsrinex.adoc",
"man/gpssnmp.1": "man/gpssnmp.adoc",
"man/gpssubframe.1": "man/gpssubframe.adoc",
"man/gpxlogger.1": "man/gpxlogger.adoc",
"man/lcdgps.1": "man/lcdgps.adoc",
"man/libgps.3": "man/libgps.adoc",
"man/libgpsmm.3": "man/libgpsmm.adoc",
"man/libQgpsmm.3": "man/libgpsmm.adoc",
"man/ntpshmmon.1": "man/ntpshmmon.adoc",
"man/ppscheck.8": "man/ppscheck.adoc",
"man/ubxtool.1": "man/ubxtool.adoc",
"man/xgps.1": "man/xgps.adoc",
"man/xgpsspeed.1": "man/xgpsspeed.adoc",
"man/zerk.1": "man/zerk.adoc",
}
# doc files to install in share/gpsd/doc
doc_files = [
'AUTHORS',
'build.adoc',
'COPYING',
'www/example1.c.txt',
'www/example2.py.txt',
'NEWS',
'README.adoc',
'SUPPORT.adoc',
]
# doc files to install in share/gpsd/doc/
icon_files = [
'packaging/X11/gpsd-logo.png',
]
# MIB files to install in $PREFIX/share/snmp/mibs/gpsd/
mib_files = [
'man/GPSD-MIB',
]
mib_lint = (mib_files + ['SConstruct', 'SConscript'])
# gpsd_version, and variantdir, from SConstruct
Import('*')
# Create a fixed-name symlink to the build tree, for scripts and symlinks.
# FIXME: Make this work with Execute()
vdir_parent = os.path.dirname(os.path.abspath(os.path.dirname(variantdir)))
try:
os.symlink(variantdir, os.path.join(vdir_parent, 'buildtmp'))
except OSError:
pass # May already exist
# API (JSON) version
api_version_major = 3
api_version_minor = 15
# client library version
libgps_version_current = 30
libgps_version_revision = 0
libgps_version_age = 0
libgps_version = "%d.%d.%d" % (libgps_version_current, libgps_version_age,
libgps_version_revision)
#
# Release identification ends here
# Hosting information (mainly used for templating web pages) begins here
# Each variable foo has a corresponding @FOO@ expanded in .in files.
# There are no project-dependent URLs or references to the hosting site
# anywhere else in the distribution; preserve this property!
annmail = "gpsd-announce@nongnu.org"
bugtracker = "https://gitlab.com/gpsd/gpsd/-/issues"
cgiupload = "root@thyrsus.com:/var/www/cgi-bin/"
clonerepo = "git@gitlab.com:gpsd/gpsd.git"
devmail = "gpsd-dev@lists.nongnu.org"
download = "http://download-mirror.savannah.gnu.org/releases/gpsd/"
formserver = "www@thyrsus.com"
gitrepo = "git@gitlab.com:gpsd/gpsd.git"
ircchan = "irc://chat.libera.chat:6697/#gpsd"
mailman = "https://lists.nongnu.org/mailman/listinfo/"
mainpage = "https://gpsd.io"
projectpage = "https://gitlab.com/gpsd/gpsd"
scpupload = "garyemiller@dl.sv.nongnu.org:/releases/gpsd/"
sitename = "GPSD"
sitesearch = "gpsd.io"
tiplink = "<a href='https://www.patreon.com/esr'>" \
"leave a remittance at Patreon</a>"
tipwidget = '<p><a href="https://www.patreon.com/esr">' \
'Donate here to support continuing development.</a></p>'
usermail = "gpsd-users@lists.nongnu.org"
webform = "http://www.thyrsus.com/cgi-bin/gps_report.cgi"
website = "https://gpsd.io/"
# Hosting information ends here
PYTHON_SYSCONFIG_IMPORT = 'from distutils import sysconfig'
# Utility productions
def Utility(target, source, action, **kwargs):
targ = env.Command(target=target, source=source, action=action, **kwargs)
# why always build? wasteful?
# when gpsdecode is the source this rebuilds the entire daemon!
# env.AlwaysBuild(targ)
# Why precious?
env.Precious(targ)
# Is pseudo really needed (didn't used to be)?
env.Pseudo(targ)
# Alias to make name work without variantdir prefix
env.Alias(target, targ)
return targ
def UtilityWithHerald(herald, target, source, action, **kwargs):
if not env.GetOption('silent'):
action = ['@echo "%s"' % herald] + action
return Utility(target=target, source=source, action=action, **kwargs)
# Spawn replacement that suppresses non-error stderr
def filtered_spawn(sh, escape, cmd, args, env):
proc = subprocess.Popen([sh, '-c', ' '.join(args)],
env=env, close_fds=True, stderr=subprocess.PIPE)
_, stderr = proc.communicate()
if proc.returncode:
sys.stderr.write(stderr)
return proc.returncode
#
# Build-control options
#
# guess systemd defaults
systemd_dir = '/lib/systemd/system'
systemd = os.path.exists(systemd_dir)
# Set distribution-specific defaults here
imloads = True
boolopts = (
# GPS protocols
# for back compatibility, deprecated Feb 2021
("ashtech", True, "alias for NMEA0183 support, deprecated"),
("earthmate", True, "DeLorme EarthMate Zodiac support"),
("evermore", True, "EverMore binary support"),
("fury", True, "Jackson Labs Fury and Firefly support"),
("fv18", True, "San Jose Navigation FV-18 support"),
("garmin", True, "Garmin kernel driver support"),
("garmintxt", True, "Garmin Simple Text support"),
("geostar", True, "Geostar Protocol support"),
("greis", True, "Javad GREIS support"),
("itrax", True, "iTrax hardware support"),
("navcom", True, "Navcom NCT support"),
("nmea2000", True, "NMEA2000/CAN support"),
("oncore", True, "Motorola OnCore chipset support"),
("sirf", True, "SiRF chipset support"),
("skytraq", True, "Skytraq chipset support"),
("superstar2", True, "Novatel SuperStarII chipset support"),
("tnt", True, "True North Technologies support"),
("tripmate", True, "DeLorme TripMate support"),
("tsip", True, "Trimble TSIP support"),
("ublox", True, "u-blox Protocol support"),
# Non-GPS protocols
("aivdm", True, "AIVDM support"),
("gpsclock", True, "Furuno GPSClock support"),
("isync", True, "Spectratime iSync LNRClok/GRCLOK support"),
("oceanserver", True, "OceanServer support"),
("rtcm104v2", True, "rtcm104v2 support"),
("rtcm104v3", True, "rtcm104v3 support"),
# Time service
("oscillator", True, "Disciplined oscillator support"),
# Export methods
("dbus_export", True, "enable DBUS export support"),
("shm_export", True, "export via shared memory"),
("socket_export", True, "data export over sockets"),
# Communication
("bluez", True, "BlueZ support for Bluetooth devices"),
('usb', True, "libusb support for USB devices"),
# Other daemon options
("control_socket", True, "control socket for hotplug notifications"),
("systemd", systemd, "systemd socket activation"),
# Client-side options
("clientdebug", True, "client debugging support"),
("libgpsmm", True, "build C++ bindings"),
("ncurses", True, "build with ncurses"),
("qt", True, "build Qt bindings"),
# Daemon options
("squelch", False, "squelch gpsd_log/gpsd_hexdump to save cpu"),
# Build control
("coveraging", False, "build with code coveraging enabled"),
("debug", False, "add debug information to build, unoptimized"),
("debug_opt", False, "add debug information to build, optimized"),
("gpsdclients", True, "gspd client programs"),
("gpsd", True, "gpsd itself"),
("implicit_link", imloads, "implicit linkage is supported in shared libs"),
# FIXME: should check for Pi, not for "linux"
("magic_hat", sys.platform.startswith('linux'),
"special Linux PPS hack for Raspberry Pi et al"),
("minimal", False, "turn off every option not set on the command line"),
("nostrip", False, "don't symbol-strip binaries at link time"),
("profiling", False, "build with profiling enabled"),
("python", True, "build Python support and modules."),
("shared", True, "build shared libraries, not static"),
("timeservice", False, "time-service configuration"),
("xgps", True, "include xgps and xgpsspeed."),
# Test control
("slow", False, "run tests with realistic (slow) delays"),
)
# now step on the boolopts just read from '.scons-option-cache'
# Otherwise if no cache, then no boolopts.
for (name, default, helpd) in boolopts:
opts.Add(BoolVariable(name, helpd, default))
# See PEP 394 for why 'python' is the preferred name for Python.
# override with "target_python=XX" on scons command line if want different
# Later there are tests for OS specifics.
def_target_python = "python"
def_python_shebang = "/usr/bin/env %s" % def_target_python
# Gentoo, Fedora, openSUSE systems use uucp for ttyS* and ttyUSB*
if os.path.exists("/etc/gentoo-release"):
def_group = "uucp"
else:
def_group = "dialout"
# darwin and BSDs do not have /run, maybe others.
if os.path.exists("/run"):
rundir = "/run"
else:
rundir = "/var/run"
nonboolopts = (
("gpsd_group", def_group, "privilege revocation group"),
("gpsd_user", "nobody", "privilege revocation user",),
("manbuild", "auto",
"build help in man and HTML formats. No/Auto/Yes."),
("max_clients", '64', "maximum allowed clients"),
("max_devices", '6', "maximum allowed devices"),
("prefix", "/usr/local", "installation directory prefix"),
("python_coverage", "coverage run", "coverage command for Python progs"),
("python_libdir", "", "Python module directory prefix"),
("python_shebang", def_python_shebang, "Python shebang"),
("qt_versioned", "", "version for versioned Qt"),
("release", "", "Suffix for gpsd version"),
("rundir", rundir,
"Directory for run-time variable data"),
("sysroot", "",
"Logical root directory for headers and libraries.\n"
"For cross-compiling, or building with multiple local toolchains.\n"
"See gcc and ld man pages for more details."),
("target", "",
"Prefix to the binary tools to use (gcc, ld, etc.)\n"
"For cross-compiling, or building with multiple local toolchains.\n"
),
# If build and target platform are different, then redefining target
# platform might be necessary to use better build flags
("target_platform", sys.platform,
"target platform for cross-compiling (linux, darwin, etc.)"),
("target_python", def_target_python, "target Python version as command"),
)
# now step on the non boolopts just read from '.scons-option-cache'
# why?
for (name, default, helpd) in nonboolopts:
opts.Add(name, helpd, default)
pathopts = (
("bindir", "bin", "application binaries directory"),
("docdir", "share/gpsd/doc", "documents directory"),
("icondir", "share/gpsd/icons", "icon directory"),
("includedir", "include", "header file directory"),
("libdir", "lib", "system libraries"),
("mandir", "share/man", "manual pages directory"),
# /usr/share/snmp/mibs is default for net-snmp
("mibdir", "share/snmp/mibs/gpsd", "MIB directory"),
("pkgconfig", "$libdir/pkgconfig", "pkgconfig file directory"),
("sbindir", "sbin", "system binaries directory"),
("sharedir", "share/gpsd", "share directory"),
("sysconfdir", "etc", "system configuration directory"),
("udevdir", "/lib/udev", "udev rules directory"),
("unitdir", systemd_dir, "Directory for systemd unit files"),
)
# now step on the path options just read from '.scons-option-cache'
for (name, default, helpd) in pathopts:
opts.Add(PathVariable(name, helpd, default, PathVariable.PathAccept))
#
# Environment creation
#
import_env = (
# Variables used by programs invoked during the build
"DISPLAY", # Required for dia to run under scons
"GROUPS", # Required by gpg
"HOME", # Required by gpg
"LANG", # To avoid Gtk warnings with Python >=3.7
'PATH', # Required for ccache and Coverity scan-build
'CCACHE_DIR', # Required for ccache
'CCACHE_RECACHE', # Required for ccache (probably there are more)
# pkg-config (required for crossbuilds at least, and probably pkgsrc)
'PKG_CONFIG_LIBDIR',
'PKG_CONFIG_PATH',
'PKG_CONFIG_SYSROOT_DIR',
# Variables for specific packaging/build systems
"MACOSX_DEPLOYMENT_TARGET", # MacOSX 10.4 (and probably earlier)
'STAGING_DIR', # OpenWRT and CeroWrt
'STAGING_PREFIX', # OpenWRT and CeroWrt
'CWRAPPERS_CONFIG_DIR', # pkgsrc
# Variables used in testing
'WRITE_PAD', # So we can test WRITE_PAD values on the fly.
)
envs = {}
for var in import_env:
if var in os.environ:
envs[var] = os.environ[var]
envs["GPSD_HOME"] = os.getcwd() + os.sep + 'gpsd'
env = Environment(tools=["default", "tar", "textfile"], options=opts, ENV=envs)
# Release identification begins here.
#
# Actual releases follow the normal X.Y or X.Y.Z scheme. The version
# number in git between releases has the form X.Y~dev, when it is
# expected that X.Y will be the next actual release. As an example,
# when 3.20 is the last release, and 3.20.1 is the expected next
# release, the version in git will be 3.20.1~dev. Note that ~ is used,
# because there is some precedent, ~ is an allowed version number in
# the Debian version rules, and it does not cause confusion with
# whether - separates components of the package name, separates the
# name from the version, or separates version components.
if 'dev' in gpsd_version:
(st, gpsd_revision) = _getstatusoutput('git describe --tags')
if st != 0:
# If git describe failed
# Try to use current commit hash
(st, gpsd_commit) = _getstatusoutput('git rev-parse HEAD')
if st == 0 and gpsd_commit:
# Format output similar to normal revision
gpsd_revision = '%s-g%s' % (gpsd_version, polystr(gpsd_commit[:9]))
else:
# Only if git describe and git rev-parse failed
# Use timestamp from latest relevant file,
# ignoring generated files (../$variantdir)
# from root, not from $variantdir
files = FileList(['../*.c', '../*/*.c', '../*.cpp', '../*/*.cpp',
'../include/*.h', '../*.in', '../*/*.in',
'../SConstruct', '../SConscript'],
'../%s' % variantdir)
timestamps = map(GetMtime, files)
if timestamps:
from datetime import datetime
latest = datetime.fromtimestamp(sorted(timestamps)[-1])
gpsd_revision = '%s-%s' % (gpsd_version, latest.isoformat())
else:
gpsd_revision = gpsd_version # Paranoia
else:
gpsd_revision = gpsd_version
gpsd_revision = polystr(gpsd_revision.strip())
# Distros like to add a suffix to the version. Fedora, and others,
# call it the "release". It often looks like: r1
if env['release']:
gpsd_revision += "-" + polystr(env['release'])
# SCons 2.3.0 lacks the Pseudo method. If it's missing here, make it a
# dummy and hope for the best.
try:
env.Pseudo
except AttributeError:
env.Pseudo = lambda x: None
# Minimal build turns off every option not set on the command line,
if ARGUMENTS.get('minimal'):
for (name, default, helpd) in boolopts:
# Ensure gpsd and gpsdclients are always enabled unless explicitly
# turned off.
if ((default is True and
not ARGUMENTS.get(name) and
name not in ("gpsd", "gpsdclients"))):
env[name] = False
# Time-service build = stripped-down with some diagnostic tools
if ARGUMENTS.get('timeservice'):
timerelated = ("gpsd",
"ipv6",
"magic_hat",
"ncurses",
"oscillator",
"socket_export",
"ublox", # For the Uputronics board
)
for (name, default, helpd) in boolopts:
if ((default is True and
not ARGUMENTS.get(name) and
name not in timerelated)):
env[name] = False
# iSync uses ublox underneath, so we force to enable it
if env['isync']:
env['ublox'] = True
opts.Save('.scons-option-cache', env)
for (name, default, helpd) in pathopts:
env[name] = env.subst(env[name])
env['VERSION'] = gpsd_version
env['SC_PYTHON'] = sys.executable # Path to SCons Python
# Set defaults from environment. Note that scons doesn't cope well
# with multi-word CPPFLAGS/LDFLAGS/SHLINKFLAGS values; you'll have to
# explicitly quote them or (better yet) use the "=" form of GNU option
# settings.
#
# Scons also uses different internal names than most other build-systems.
# So we rely on MergeFlags/ParseFlags to do the right thing for us.
#
# scons uses gcc, or clang, to link. Thus LDFLAGS does not serve its
# traditional function of providing arguments to ln. LDFLAGS set in the
# environment before running scons get moved into CCFLAGS by scons.
# LDFLAGS set while running scons get ignored.
#
# This means all uses of LDFLAG in this file ae simply dead code. Except
# for the import from the environment passed to scons.
env['STRIP'] = "strip"
env['PKG_CONFIG'] = "pkg-config"
for i in ["AR", # linker for static libs, usually "ar"
"CC",
"CXX",
# "LD", # scons does not use LD, usually "ld"
"PKG_CONFIG",
"SHLINK", # linker for shared libs, usually "gcc" or "g++", NOT "ld"
"STRIP",
"TAR"]:
if i in os.environ:
env[i] = os.getenv(i)
for i in ["ARFLAGS",
"CCFLAGS",
"CFLAGS",
"CPPFLAGS",
"CXXFLAGS",
"LDFLAGS",
"LINKFLAGS",
"SHLINKFLAGS",
]:
if i in os.environ:
# MergeFlags() puts the options where scons wants them, not
# where you asked them to go.
env.MergeFlags(Split(os.getenv(i)))
# Keep scan-build options in the environment
for key, value in os.environ.items():
if key.startswith('CCC_'):
env.Append(ENV={key: value})
# Placeholder so we can kluge together something like VPATH builds.
# $SRCDIR replaces occurrences for $(srcdir) in the autotools build.
# scons can get confused if this is not a full path
# FIXME: could get variantdir from SRCDIR
env['SRCDIR'] = os.getcwd()
# We may need to force slow regression tests to get around race
# conditions in the pty layer, especially on a loaded machine.
if env["slow"]:
env['REGRESSOPTS'] = "-S"
else:
env['REGRESSOPTS'] = ""
if env.GetOption("silent"):
env['REGRESSOPTS'] += " -Q"
def announce(msg, end=False):
if not env.GetOption("silent"):
print(msg)
if end:
# duplicate message at exit
atexit.register(lambda: print(msg))
announce("scons version: %s" % SCons.__version__)
announce("scons is running under Python version: %s" %
".".join(map(str, sys.version_info)))
announce("gpsd version: %s" % polystr(gpsd_revision))
# DESTDIR environment variable means user prefix the installation root.
DESTDIR = os.environ.get('DESTDIR', '')
def installdir(idir, add_destdir=True):
# use os.path.join to handle absolute paths properly.
wrapped = os.path.join(env['prefix'], env[idir])
if add_destdir:
wrapped = os.path.normpath(DESTDIR + os.path.sep + wrapped)
wrapped.replace("/usr/etc", "/etc")
wrapped.replace("/usr/lib/systemd", "/lib/systemd")
return wrapped
# Honor the specified installation prefix in link paths.
if env["sysroot"]:
env.Prepend(LIBPATH=[env["sysroot"] + installdir('libdir',
add_destdir=False)])
# Don't change CCFLAGS if already set by environment.
if 'CCFLAGS' in os.environ:
announce('Warning: CCFLAGS from environment overriding scons settings')
else:
# Should we build with profiling?
if env['profiling']:
env.Append(CCFLAGS=['-pg'])
# Should we build with coveraging?
if env['coveraging']:
env.Append(CFLAGS=['-coverage'])
env.Append(LINKFLAGS=['-coverage'])
# Should we build with debug symbols?
if env['debug'] or env['debug_opt']:
env.Append(CCFLAGS=['-g3'])
env.Append(LINKFLAGS=['-g3'])
# Should we build with optimisation?
if env['debug'] or env['coveraging']:
env.Append(CCFLAGS=['-O0'])
else:
env.Append(CCFLAGS=['-O2'])
# Cross-development
devenv = (("ADDR2LINE", "addr2line"),
("AR", "ar"),
("AS", "as"),
("CC", "gcc"),
("CPP", "cpp"),
("CXX", "c++"),
("CXXFILT", "c++filt"),
("GCCBUG", "gccbug"),
("GCOV", "gcov"),
("GPROF", "gprof"),
("GXX", "g++"),
# ("LD", "ld"), # scons does not use LD
("NM", "nm"),
("OBJCOPY", "objcopy"),
("OBJDUMP", "objdump"),
("RANLIB", "ranlib"),
("READELF", "readelf"),
("SIZE", "size"),
("STRINGS", "strings"),
("STRIP", "strip"),
)
if env['target']:
for (name, toolname) in devenv:
env[name] = env['target'] + '-' + toolname
if env['sysroot']:
env.MergeFlags({"CFLAGS": ["--sysroot=%s" % env['sysroot']]})
env.MergeFlags({"LINKFLAGS": ["--sysroot=%s" % env['sysroot']]})
# Build help
def cmp(a, b):
return (a > b) - (a < b)
# FIXME: include __doc__ in help
Help("""Arguments may be a mixture of switches and targets in any order.
Switches apply to the entire build regardless of where they are in the order.
Important switches include:
prefix=/usr probably what packagers want
Options are cached in a file named .scons-option-cache and persist to later
invocations. The file is editable. Delete it to start fresh. Current option
values can be listed with 'scons -h'.
""" + opts.GenerateHelpText(env, sort=cmp))
# Configuration
def CheckFlt_Eval_Method(context):
"""Ensure FLT_EVAL_METHOD is 0"""
context.Message('Checking FLT_EVAL_METHOD is 0... ')
ret = context.TryLink("""
#include <float.h>
#ifndef FLT_EVAL_METHOD
error
#endif
#if 0 != FLT_EVAL_METHOD
error
#endif
int main(int argc, char **argv) {
(void) argc; (void) argv;
return 0;
}
""", '.c')
context.Result(ret)
return ret
def CheckPKG(context, name):
context.Message('Checking pkg-config for %s... ' % name)
ret = context.TryAction('%s --exists \'%s\''
% (context.env['PKG_CONFIG'], name))[0]
context.Result(ret)
return ret
def CheckStrerror_r(context):
"""Return strerror_r(24,...).
Will return true if POSIX, false if gnu-like
Required because libc's are random about it.
"""
context.Message('Checking if strerror_r() returns int... ')
old_CFLAGS = context.env['CFLAGS'][:] # Get a *copy* of the old list
# Make the cast warning an error
context.env.Append(CFLAGS="-Werror")
ret = context.TryCompile("""
#define _GNU_SOURCE
#include <stddef.h>
#include <string.h>
int main() {
char buf[100];
int ret;
ret = strerror_r(24, buf, sizeof(buf));
return ret;
}
""", '.c')
context.Result(ret)
context.env.Replace(CFLAGS=old_CFLAGS) # restore flags
return ret
def CheckCompilerOption(context, option):
context.Message('Checking if compiler accepts %s... ' % (option,))
old_CFLAGS = context.env['CFLAGS'][:] # Get a *copy* of the old list
context.env.Append(CFLAGS=option)
new_CFLAGS = context.env['CFLAGS'][:] # Get a *copy* of the old list
# we don't want to use options that gernerate warnings.
context.env.Append(CFLAGS="-Werror")
ret = context.TryLink("""
int main(int argc, char **argv) {
(void) argc; (void) argv;
return 0;
}
""", '.c')
if ret:
# worked, remove the -Werror
context.env.Replace(CFLAGS=new_CFLAGS)
else:
context.env.Replace(CFLAGS=old_CFLAGS)
context.Result(ret)
return ret
# Check if this compiler is C11 or better
def CheckC11(context):
context.Message('Checking if compiler is C11... ')
ret = context.TryLink("""
#if (__STDC_VERSION__ < 201112L)
#error Not C11
#endif
int main(int argc, char **argv) {
(void) argc; (void) argv;
return 0;
}
""", '.c')
context.Result(ret)
return ret
def GetPythonValue(context, name, imp, expr, brief=False):
"""Get a value from the target python, not the running one."""
context.Message('Checking Python %s... ' % name)
if context.env['target_python']:
command = (context.env['target_python'] + " $SOURCE > $TARGET")
text = "%s; print(%s)" % (imp, expr)
# TryAction returns (1, outputStr), or (0, '') on fail
(status, value) = context.TryAction(command, text, '.py')
# do not disable python because this failed
# maybe testing for newer python feature
else:
# FIXME: this ignores imp
status = 1
value = str(eval(expr))
if 1 == status:
# we could convert to str(), but caching turns it into bytes anyway
value = value.strip()
if brief is True:
context.did_show_result = 1
print("ok")
context.Result(value)
# return value
return value
def GetLoadPath(context):
context.Message("Getting system load path... ")
cleaning = env.GetOption('clean')
helping = env.GetOption('help')
# Always set up LIBPATH so that cleaning works properly.
# FIXME: use $SRCDIR?
env.Prepend(LIBPATH=[os.path.realpath(os.curdir)])
# from scons 3.0.5, any changes to env after this, until after
# config.Finish(), will be lost. Use config.env until then.
config = Configure(env, custom_tests={
'CheckC11': CheckC11,
'CheckCompilerOption': CheckCompilerOption,
'CheckFlt_Eval_Method': CheckFlt_Eval_Method,
'CheckPKG': CheckPKG,
'CheckStrerror_r': CheckStrerror_r,
'GetPythonValue': GetPythonValue,
})
# Use print, rather than announce, so we see it in -s mode.
print("This system is: %s" % sys.platform)
libgps_flags = []
rtlibs = []
bluezflags = []
confdefs = []
dbusflags = []
adoc_prog = False
ncurseslibs = []
mathlibs = []
xtlibs = []
tiocmiwait = True # For cleaning, which works on any OS
usbflags = []
have_dia = False
# canplayer is part of can-utils, required for NMEA 2000 tests
have_canplayer = False
have_coverage = False
have_cppcheck = False
have_flake8 = False
have_pycodestyle = False
have_pylint = False
have_scan_build = False
have_smilint = False
have_tar = False
have_valgrind = False
# skip config part if cleaning or helping.
# per SCons 4.0.1 doc: Section 23.9. Not Configuring When Cleaning Targets
if not cleaning and not helping:
# OS X aliases gcc to clang
if (sys.platform != config.env['target_platform']):
announce("Target system is: %s" % config.env['target_platform'])
if 'CCVERSION' in env:
announce("cc is %s, version %s" % (env['CC'], env['CCVERSION']))
else:
# sometimes scons can not determine clang version
announce("cc is %s, WARNING version is unknown" % env['CC'])
# clang accepts -pthread, then warns it is unused.
if not config.CheckCC():
announce("ERROR: CC doesn't work")
if ((config.CheckCompilerOption("-pthread") and
not config.env['target_platform'].startswith('darwin'))):
config.env.MergeFlags("-pthread")
if config.env['target_platform'].startswith('openbsd7'):
# as of 5 Jan 23:
# scons 4.4.0 with clang 13.0.0 has trouble determining clang version.
# Then fails to add -fPIC. So we force it here:
config.env.Append(CCFLAGS=['-fPIC'])
confdefs = ["/* gpsd_config.h generated by scons, do not hand-hack. */\n"]
confdefs.append('#ifndef GPSD_CONFIG_H\n')
confdefs.append('#define VERSION "%s"' % gpsd_version)
confdefs.append('#define REVISION "%s"' % gpsd_revision)
confdefs.append('#define GPSD_PROTO_VERSION_MAJOR %u' % api_version_major)
confdefs.append('#define GPSD_PROTO_VERSION_MINOR %u' % api_version_minor)
confdefs.append('#define GPSD_URL "%s"\n' % website)
# TODO: Move these into an if block only on systems with glibc.
# needed for isfinite(), pselect(), etc.
# for strnlen() before glibc 2.10
# glibc 2.10+ needs 200908L (or XOPEN 700+) for strnlen()
# on newer glibc _DEFAULT_SOURCE resets _POSIX_C_SOURCE
# we set it just in case
confdefs.append('#if !defined(_POSIX_C_SOURCE)')
confdefs.append('#define _POSIX_C_SOURCE 200809L')
confdefs.append('#endif\n')
# for daemon(), cfmakeraw(), strsep() and setgroups()
# on glibc 2.19+
# may also be added by pkg_config
# on linux this eventually sets _USE_XOPEN
confdefs.append('#if !defined(_DEFAULT_SOURCE)')
confdefs.append('#define _DEFAULT_SOURCE')
confdefs.append('#endif\n')
# sys/un.h, and more, needs __USE_MISC with glibc and osX
# __USE_MISC is set by _DEFAULT_SOURCE or _BSD_SOURCE
# TODO: Many of these are now specified by POSIX. Check if
# defining _XOPEN_SOURCE is necessary, and limit to systems where
# it is.
# 500 means X/Open 1995
# getsid(), isascii(), nice(), putenv(), strdup(), sys/ipc.h need 500
# 600 means X/Open 2004
# Ubuntu and OpenBSD isfinite() needs 600
# 700 means X/Open 2008
# glibc 2.10+ needs 700+ for strnlen()
# Python.h wants 600 or 700
# removed 2 Jul 2019 to see if anything breaks...
# confdefs.append('#if !defined(_XOPEN_SOURCE)')
# confdefs.append('#define _XOPEN_SOURCE 700')
# confdefs.append('#endif\n')
# Reinstated for FreeBSD (below) 16-Aug-2019
if config.env['target_platform'].startswith('linux'):
# for cfmakeraw(), strsep(), etc. on CentOS 7
# glibc 2.19 and before
# sets __USE_MISC
confdefs.append('#if !defined(_BSD_SOURCE)')
confdefs.append('#define _BSD_SOURCE')
confdefs.append('#endif\n')
# for strnlen() and struct ifreq
# glibc before 2.10, deprecated in 2.10+
confdefs.append('#if !defined(_GNU_SOURCE)')
confdefs.append('#define _GNU_SOURCE 1')
confdefs.append('#endif\n')
elif config.env['target_platform'].startswith('darwin'):
# strlcpy() and SIGWINCH need _DARWIN_C_SOURCE
confdefs.append('#if !defined(_DARWIN_C_SOURCE)')
confdefs.append('#define _DARWIN_C_SOURCE 1\n')
confdefs.append('#endif\n')
# vsnprintf() needs __DARWIN_C_LEVEL >= 200112L
# snprintf() needs __DARWIN_C_LEVEL >= 200112L
# _DARWIN_C_SOURCE forces __DARWIN_C_LEVEL to 900000L
# see <sys/cdefs.h>
# set internal lib versions at link time.
libgps_flags = ["-Wl,-current_version,%s" % libgps_version,
"-Wl,-compatibility_version,%s" % libgps_version,
"-Wl,-install_name,%s/$TARGET.srcpath" %
installdir('libdir', add_destdir=False)]
elif config.env['target_platform'].startswith('freebsd'):
# for isascii(), putenv(), nice(), strptime()
confdefs.append('#if !defined(_XOPEN_SOURCE)')
confdefs.append('#define _XOPEN_SOURCE 700')
confdefs.append('#endif\n')
# required to define u_int in sys/time.h
confdefs.append('#if !defined(_BSD_SOURCE)')
confdefs.append("#define _BSD_SOURCE 1\n")
confdefs.append('#endif\n')
# required to get strlcpy(), and more, from string.h
confdefs.append('#if !defined(__BSD_VISIBLE)')
confdefs.append("#define __BSD_VISIBLE 1\n")
confdefs.append('#endif\n')
elif config.env['target_platform'].startswith('openbsd'):
# required to define u_int in sys/time.h
confdefs.append('#if !defined(_BSD_SOURCE)')
confdefs.append("#define _BSD_SOURCE 1\n")
confdefs.append('#endif\n')
# required to get strlcpy(), and more, from string.h
confdefs.append('#if !defined(__BSD_VISIBLE)')
confdefs.append("#define __BSD_VISIBLE 1\n")
confdefs.append('#endif\n')
elif config.env['target_platform'].startswith('netbsd'):
# required to get strlcpy(), and more, from string.h
confdefs.append('#if !defined(_NETBSD_SOURCE)')
confdefs.append("#define _NETBSD_SOURCE 1\n")
confdefs.append('#endif\n')
elif config.env['target_platform'].startswith('sunos5'):
# tested with gcc-5.5 on slowlaris 10
# required to get isascii(), and more, from ctype.h
confdefs.append('#if !defined(__XPG4_CHAR_CLASS__)')
confdefs.append("#define __XPG4_CHAR_CLASS__ 1\n")
confdefs.append('#endif\n')
confdefs.append('#if !defined(__XPG6)')
confdefs.append('#define _XPG6\n')
confdefs.append('#endif\n')
# for things like strlcat(), strlcpy)
confdefs.append('#if !defined(__EXTENSIONS__)')
confdefs.append('#define __EXTENSIONS__\n')
confdefs.append('#endif\n')
cxx = config.CheckCXX()
if not cxx:
announce("C++ doesn't work, suppressing libgpsmm and Qt build.")
config.env["libgpsmm"] = False
config.env["qt"] = False
# define a helper function for pkg-config - we need to pass
# --static for static linking, too.
#
# Using "--libs-only-L --libs-only-l" instead of "--libs" avoids
# a superfluous "-rpath" option in some FreeBSD cases, and the resulting
# scons crash.
# However, it produces incorrect results for Qt5Network in OSX, so
# it can't be used unconditionally.
def pkg_config(pkg, shared=env['shared'], rpath_hack=False):
libs = '--libs-only-L --libs-only-l' if rpath_hack else '--libs'
if not shared:
libs += ' --static'
return ['!%s --cflags %s %s' % (env['PKG_CONFIG'], libs, pkg)]
# The actual distinction here is whether the platform has ncurses in the
# base system or not. If it does, pkg-config is not likely to tell us
# anything useful. FreeBSD does, Linux doesn't. Most likely other BSDs
# are like FreeBSD.
ncurseslibs = []
if config.env['ncurses']:
if not config.CheckHeader(["curses.h"]):
announce('Turning off ncurses support, curses.h not found.')
config.env['ncurses'] = False
elif config.CheckPKG('ncurses'):
ncurseslibs = pkg_config('ncurses', rpath_hack=True)
if config.CheckPKG('tinfo'):
ncurseslibs += pkg_config('tinfo', rpath_hack=True)
elif config.CheckPKG('ncursesw'):
# One distro in 2022, Void, only ships the ncursesw
# part of ncurses.
ncurseslibs = pkg_config('ncursesw', rpath_hack=True)
if config.CheckPKG('tinfo'):
ncurseslibs += pkg_config('tinfo', rpath_hack=True)
# It's not yet known whether rpath_hack is appropriate for
# ncurses5-config.
elif WhereIs('ncurses5-config'):
ncurseslibs = ['!ncurses5-config --libs --cflags']
elif WhereIs('ncursesw5-config'):
ncurseslibs = ['!ncursesw5-config --libs --cflags']
elif config.env['target_platform'].startswith('freebsd'):
ncurseslibs = ['-lncurses']
elif (config.env['target_platform'].startswith('darwin') or
config.env['target_platform'].startswith('openbsd') or
config.env['target_platform'].startswith('sunos5')):
ncurseslibs = ['-lcurses']
else:
announce('Turning off ncurses support, library not found.')
config.env['ncurses'] = False
if config.env['usb']:
# In FreeBSD except version 7, USB libraries are in the base system
if config.CheckPKG('libusb-1.0'):
confdefs.append("#define HAVE_LIBUSB 1\n")
try:
usbflags = pkg_config('libusb-1.0')
except OSError:
announce("pkg_config is confused about the state "
"of libusb-1.0.")
usbflags = []
elif config.env['target_platform'].startswith('freebsd'):
# FIXME: shold directly test for libusb existence.
confdefs.append("#define HAVE_LIBUSB 1\n")
usbflags = ["-lusb"]
else:
confdefs.append("/* #undef HAVE_LIBUSB */\n")
usbflags = []
else:
confdefs.append("/* #undef HAVE_LIBUSB */\n")
usbflags = []
config.env["usb"] = False
if config.CheckLib('librt'):
confdefs.append("#define HAVE_LIBRT 1\n")
# System library - no special flags
rtlibs = ["-lrt"]
else:
confdefs.append("/* #undef HAVE_LIBRT */\n")
# for slowlaris socket(), bind(), etc.
if config.CheckLib('libnsl'):
confdefs.append("#define HAVE_LIBNSL\n")
# System library - no special flags
rtlibs += ["-lnsl"]
else:
confdefs.append("/* #undef HAVE_LIBNSL */\n")
# for slowlaris socket(), bind(), etc.
if config.CheckLib('libsocket'):
confdefs.append("#define HAVE_LIBSOCKET\n")
# System library - no special flags
rtlibs += ["-lsocket"]
else:
confdefs.append("/* #undef HAVE_LIBNSOCKET */\n")
# The main reason we check for libm explicitly is to set up the config
# environment for CheckFunc for sincos(). But it doesn't hurt to omit
# the '-lm' when it isn't appropriate.
if config.CheckLib('libm'):
mathlibs = ['-lm']
else:
mathlibs = []
# FreeBSD uses -lthr for pthreads
if config.CheckLib('libthr'):
confdefs.append("#define HAVE_LIBTHR 1\n")
# System library - no special flags
rtlibs += ["-lthr"]
else:
confdefs.append("/* #undef HAVE_LIBTHR */\n")
if config.env['dbus_export'] and config.CheckPKG('dbus-1'):
confdefs.append("#define HAVE_DBUS 1\n")
dbusflags = pkg_config("dbus-1")
config.env.MergeFlags(dbusflags)
else:
confdefs.append("/* #undef HAVE_DBUS */\n")
dbusflags = []
if config.env["dbus_export"]:
announce("Turning off dbus-export support, library not found.")
config.env["dbus_export"] = False
if config.env['bluez'] and config.CheckPKG('bluez'):
confdefs.append("#define ENABLE_BLUEZ 1\n")
bluezflags = pkg_config('bluez')
else:
confdefs.append("/* #undef ENABLE_BLUEZ */\n")
bluezflags = []
if config.env["bluez"]:
announce("Turning off Bluetooth support, library not found.")
config.env["bluez"] = False
# in_port_t is not defined on Android
if not config.CheckType("in_port_t", "#include <netinet/in.h>"):
announce("Did not find in_port_t typedef, assuming unsigned short int")
confdefs.append("typedef unsigned short int in_port_t;\n")
# SUN_LEN is not defined on Android
if ((not config.CheckDeclaration("SUN_LEN", "#include <sys/un.h>") and
not config.CheckDeclaration("SUN_LEN", "#include <linux/un.h>"))):
announce("SUN_LEN is not system-defined, using local definition")
confdefs.append("#ifndef SUN_LEN\n")
confdefs.append("#define SUN_LEN(ptr) "
"((size_t) (((struct sockaddr_un *) 0)->sun_path) "
"+ strlen((ptr)->sun_path))\n")
confdefs.append("#endif /* SUN_LEN */\n")
if config.CheckHeader(["linux/can.h"]):
confdefs.append("#define HAVE_LINUX_CAN_H 1\n")
announce("You have kernel CANbus available.")
else:
confdefs.append("/* #undef HAVE_LINUX_CAN_H */\n")
announce("You do not have kernel CANbus available.")
config.env["nmea2000"] = False
# check for C11 or better, and __STDC__NO_ATOMICS__ is not defined
# before looking for stdatomic.h
if ((config.CheckC11() and
not config.CheckDeclaration("__STDC_NO_ATOMICS__") and
config.CheckHeader("stdatomic.h"))):
confdefs.append("#define HAVE_STDATOMIC_H 1\n")
else:
confdefs.append("/* #undef HAVE_STDATOMIC_H */\n")
if config.CheckHeader("libkern/OSAtomic.h"):
confdefs.append("#define HAVE_OSATOMIC_H 1\n")
else:
confdefs.append("/* #undef HAVE_OSATOMIC_H */\n")
announce("No memory barriers - SHM export and time hinting "
"may not be reliable.")
# endian.h is required for rtcm104v2 unless the compiler defines
# __ORDER_BIG_ENDIAN__, __ORDER_LITTLE_ENDIAN__ and __BYTE_ORDER__
if ((config.CheckDeclaration("__ORDER_BIG_ENDIAN__") and
config.CheckDeclaration("__ORDER_LITTLE_ENDIAN__") and
config.CheckDeclaration("__BYTE_ORDER__"))):
confdefs.append("#define HAVE_BUILTIN_ENDIANNESS 1\n")
confdefs.append("/* #undef HAVE_ENDIAN_H */\n")
confdefs.append("/* #undef HAVE_SYS_ENDIAN_H */\n")
announce("Your compiler has built-in endianness support.")
else:
confdefs.append("/* #undef HAVE_BUILTIN_ENDIANNESS\n */")
if config.CheckHeader("endian.h"):
confdefs.append("#define HAVE_ENDIAN_H 1\n")
confdefs.append("/* #undef HAVE_SYS_ENDIAN_H */\n")
confdefs.append("/* #undef HAVE_MACHINE_ENDIAN_H */\n")
elif config.CheckHeader("sys/endian.h"):
confdefs.append("/* #undef HAVE_ENDIAN_H */\n")
confdefs.append("#define HAVE_SYS_ENDIAN_H 1\n")
confdefs.append("/* #undef HAVE_MACHINE_ENDIAN_H */\n")
elif config.CheckHeader("machine/endian.h"):
confdefs.append("/* #undef HAVE_ENDIAN_H */\n")
confdefs.append("/* #undef HAVE_SYS_ENDIAN_H */\n")
confdefs.append("#define HAVE_MACHINE_ENDIAN_H 1\n")
else:
confdefs.append("/* #undef HAVE_ENDIAN_H */\n")
confdefs.append("/* #undef HAVE_SYS_ENDIAN_H */\n")
confdefs.append("/* #undef HAVE_MACHINE_ENDIAN_H */\n")
announce("You do not have the endian.h header file. "
"RTCM V2 support disabled.")
config.env["rtcm104v2"] = False
for hdr in ("arpa/inet",
"linux/serial", # for serial_icounter_struct
"netdb",
"netinet/in",
"netinet/ip",
"sys/sysmacros", # for major(), on linux
"sys/socket",
"sys/un",
"syslog",
"termios",
"winsock2"
):
if config.CheckHeader(hdr + ".h"):
confdefs.append("#define HAVE_%s_H 1\n"
% hdr.replace("/", "_").upper())
elif "termios" == hdr:
announce("ERROR: %s.h not found" % hdr)
else:
confdefs.append("/* #undef HAVE_%s_H */\n"
% hdr.replace("/", "_").upper())
if not config.CheckFlt_Eval_Method():
announce("WARNING: FLT_EVAL_METHOD is not 0")
if config.CheckStrerror_r():
# POSIX behavior
confdefs.append("#define STRERROR_R_INT\n")
else:
# glibc behavior
confdefs.append("#define STRERROR_R_STR\n")
# check for 64 bit time_t. Needed for 2038.
sizeof_time_t = config.CheckTypeSize("time_t", "#include <time.h>",
expect=8)
if 0 == sizeof_time_t:
# see if we can force time64_t
# this needs glibc 2.34 or later, and a compatible kernel
sizeof_time_t = config.CheckTypeSize("time_t",
"#define _TIME_BITS 64\n"
"#define _FILE_OFFSET_BITS 64\n"
"#include <time.h>",
expect=8)
if 0 != sizeof_time_t:
# force time64_t
confdefs.append("// Forcing 64-bit time_t\n"
"#define _TIME_BITS 64\n"
"#define _FILE_OFFSET_BITS 64\n")
if 0 == sizeof_time_t:
announce("WARNING: time_t is too small. It will fail in 2038")
sizeof_time_t = 4
else:
sizeof_time_t = 8
confdefs.append("#define SIZEOF_TIME_T %s\n" % sizeof_time_t)
# check function after libraries, because some function require libraries
# for example clock_gettime() require librt on Linux glibc < 2.17
for f in ("cfmakeraw", "clock_gettime", "daemon", "fcntl", "fork",
"getopt_long",
"gmtime_r", "inet_ntop", "strlcat", "strlcpy", "strnlen",
"strptime"):
if config.CheckFunc(f):
confdefs.append("#define HAVE_%s 1\n" % f.upper())
else:
confdefs.append("/* #undef HAVE_%s */\n" % f.upper())
# used to check for sincos(), but making that work with -Werror did not work.
if config.CheckHeader(["sys/types.h", "sys/time.h", "sys/timepps.h"]):
confdefs.append("#define HAVE_SYS_TIMEPPS_H 1\n")
kpps = True
else:
kpps = False
if config.env["magic_hat"]:
announce("Forcing magic_hat=no since RFC2783 API is unavailable")
config.env["magic_hat"] = False
tiocmiwait = config.CheckDeclaration("TIOCMIWAIT",
"#include <sys/ioctl.h>")
if not tiocmiwait and not kpps:
announce("WARNING: Neither TIOCMIWAIT (PPS) nor RFC2783 API (KPPS) "
"is available.", end=True)
if config.env["timeservice"]:
announce("ERROR: timeservice specified, but no PPS available")
Exit(1)
# Map options to libraries required to support them that might be absent.
optionrequires = {
"bluez": ["libbluetooth"],
"dbus_export": ["libdbus-1"],
}
keys = list(map(lambda x: (x[0], x[2]), boolopts)) \
+ list(map(lambda x: (x[0], x[2]), nonboolopts)) \
+ list(map(lambda x: (x[0], x[2]), pathopts))
keys.sort()
for (key, helpd) in keys:
value = config.env[key]
if value and key in optionrequires:
for required in optionrequires[key]:
if not config.CheckLib(required):
announce("%s not found, %s cannot be enabled."
% (required, key))
value = False
break
confdefs.append("/* %s */" % helpd)
if isinstance(value, bool):
if value:
confdefs.append("#define %s_ENABLE 1\n" % key.upper())
else:
confdefs.append("/* #undef %s_ENABLE */\n" % key.upper())
elif value in (0, "", "(undefined)"):
confdefs.append("/* #undef %s */\n" % key.upper())
else:
if value.isdigit():
confdefs.append("#define %s %s\n" % (key.upper(), value))
else:
confdefs.append("#define %s \"%s\"\n" % (key.upper(), value))
# Simplifies life on hackerboards like the Raspberry Pi
if config.env['magic_hat']:
confdefs.append('''\
/* Magic device which, if present, means to grab a static /dev/pps0 for KPPS */
#define MAGIC_HAT_GPS "/dev/ttyAMA0"
/* Generic device which, if present, means: */
/* to grab a static /dev/pps0 for KPPS */
#define MAGIC_LINK_GPS "/dev/gpsd0"
''')
confdefs.append('''\
#define GPSD_CONFIG_H
#endif /* GPSD_CONFIG_H */
''')
# handle manbuild = no/auto/yes
# do we have asciidoctor, perhaps versioned?
adoc_prog = env.WhereIs('asciidoctor')
if (not adoc_prog):
adoc_prog = env.WhereIs('asciidoctor31')
if (not adoc_prog):
adoc_prog = env.WhereIs('asciidoctor30')
if (not adoc_prog):
adoc_prog = env.WhereIs('asciidoctor27')
config.env['manbuild'] = config.env['manbuild'].lower()
if ((not config.env['manbuild'] or
'auto' == config.env['manbuild'])):
if adoc_prog:
config.env['manbuild'] = 1
announce("Build of man and HTML documentation enabled.")
else:
config.env['manbuild'] = 0
announce("WARNING: AsciiDoctor not found.\n"
"WARNING: Some documentation and html will not be built.",
end=True)
else:
try:
config.env['manbuild'] = strtobool(
config.env['manbuild'])
except ValueError:
announce("ERROR: manbuild must be no/auto/yes.")
sys.exit(1)
if 0 == config.env['manbuild']:
adoc_prog = None
announce("Build of man and HTML documentation disabled.")
elif 1 == config.env['manbuild'] and not adoc_prog:
announce("ERROR: manbuild=True, but AsciiDoctor not found.\n")
sys.exit(1)
else:
announce("Build of man and HTML documentation enabled.")
# end handle manbuild = no/auto/yes
# Determine if Qt network libraries are present, and
# if not, force qt to off
if config.env["qt"]:
qt_net_name = 'Qt%sNetwork' % config.env["qt_versioned"]
qt_network = config.CheckPKG(qt_net_name)
if not qt_network:
config.env["qt"] = False
announce('Turning off Qt support, library not found.')
# If supported by the compiler, enable all warnings except uninitialized
# and missing-field-initializers, which we can't help triggering because
# of the way some of the JSON-parsing code is generated.
#
# Some flags work for cc, but not c++, add those here, and to c_oply
# below
#
# Do this after the other config checks, to keep warnings out of them.
for option in (
# -Wall and Wextra first as they modify later options
'-Wall',
'-Wextra',
# clang: ask for C Annex F standard floating point
'--disable-excess-fp-precision',
# gcc: ask for C Annex F standard floating point
'-fexcess-precision=standard',
'-Wcast-align',
'-Wcast-qual',
# -Wimplicit-fallthrough same as
# -Wimplicit-fallthrough=3, except osX hates the
# second flavor
'-Wimplicit-fallthrough',
# '-Wimplicit-function-declaration', # someday, annoys C++
'-Wmissing-declarations',
'-Wmissing-prototypes',
'-Wno-missing-field-initializers',
'-Wno-uninitialized',
'-Wpointer-arith',
'-Wreturn-type',
'-Wstrict-prototypes',
'-Wundef',
'-Wvla',
):
if option not in config.env['CFLAGS']:
config.CheckCompilerOption(option)
# check for misc audit programs
try:
have_canplayer = config.CheckProg('canplayer')
have_coverage = config.CheckProg('coverage')
have_cppcheck = config.CheckProg('cppcheck')
have_dia = config.CheckProg('dia')
have_flake8 = config.CheckProg('flake8')
have_pycodestyle = config.CheckProg('pycodestyle')
have_pylint = config.CheckProg('pylint')
have_scan_build = config.CheckProg('scan-build')
# smilint is part of libsmi package
have_smilint = config.CheckProg('smilint')
have_tar = config.CheckProg(env['TAR'])
have_valgrind = config.CheckProg('valgrind')
except AttributeError:
# scons versions before Sep 2015 (2.4.0) don't have CheckProg
# gpsd only asks for 2.3.0 or higher
announce("scons CheckProg() failed..")
if not have_canplayer:
announce("Program canplayer not found -- skipping NMEA 2000 tests")
if not have_coverage:
announce("Program coverage not found -- skipping Python coverage")
if not have_cppcheck:
announce("Program cppcheck not found -- skipping cppcheck checks")
if not have_dia:
announce("Program dia not found -- not rebuiding cycle.svg.")
if not have_flake8:
announce("Program flake8 not found -- skipping flake8 checks")
if not have_pycodestyle:
announce("Program pycodestyle not found -- "
"skipping pycodestyle checks")
if not have_pylint:
announce("Program pylint not found -- skipping pylint checks")
if not have_scan_build:
announce("Program scan-build not found -- skipping scan-build checks")
if not have_smilint:
announce("Program smilint not found -- skipping MIB checks")
if not have_tar:
announce('WARNING: %s not found. Can not build tar files.' %
env['TAR'])
if not have_valgrind:
announce("Program valgrind not found -- skipping valgrind checks")
# Set up configuration for target Python
PYTHON_LIBDIR_CALL = 'sysconfig.get_python_lib()'
PYTHON_CONFIG_NAMES = ['SO'] # Now a fairly degenerate list
PYTHON_CONFIG_QUOTED = ["'%s'" % s for s in PYTHON_CONFIG_NAMES]
PYTHON_CONFIG_CALL = ('sysconfig.get_config_vars(%s)'
% ', '.join(PYTHON_CONFIG_QUOTED))
python_config = {} # Dummy for all non-Python-build cases
target_python_path = ''
py_config_text = str(eval(PYTHON_CONFIG_CALL))
python_libdir = str(eval(PYTHON_LIBDIR_CALL))
# flag if we have xgps* dependencies, so xgps* should run OK
config.env['xgps_deps'] = False
if not cleaning and not helping and config.env['python']:
if config.env['target_python']:
try:
config.CheckProg
except AttributeError:
# scons versions before Nov 2015 (2.4.1) don't have CheckProg
# gpsd only asks for 2.3.0 or higher
target_python_path = config.env['target_python']
else:
target_python_path = config.CheckProg(config.env['target_python'])
if ((not target_python_path and
'python' == config.env['target_python'])):
# some distros don't install a python target, only python3
announce("Target Python '%s' doesn't exist. "
"Trying 'python3'." %
config.env['target_python'])
config.env['target_python'] = 'python3'
python_shebang = "/usr/bin/env %s" % def_target_python
try:
config.CheckProg
except AttributeError:
# FIXME: duplicates code above
# scons versions before Nov 2015 (2.4.1) don't
# have CheckProg # gpsd only asks for 2.3.0 or higher
target_python_path = config.env['target_python']
else:
target_python_path = config.CheckProg(
config.env['target_python'])
if not target_python_path:
announce("Target Python '%s' doesn't exist. Disabling Python." %
config.env['target_python'])
announce("Use the target_python=XX configuration option if you "
"have a working python target.")
config.env['python'] = False
if config.env['python']:
if not target_python_path:
# Avoid double testing for target_python_path
# Maximize consistency by using the reported sys.executable
target_python_path = config.GetPythonValue('exe path',
'import sys',
'sys.executable')
target_python_path = polystr(target_python_path)
# python module directory
if config.env['python_libdir']:
python_libdir = config.env['python_libdir']
else:
python_libdir = config.GetPythonValue('lib dir',
PYTHON_SYSCONFIG_IMPORT,
PYTHON_LIBDIR_CALL)
# follow FHS, put in /usr/local/libXX, not /usr/libXX
# may be lib, lib32 or lib64
python_libdir = polystr(python_libdir)
python_libdir = python_libdir.replace("/usr/lib",
"/usr/local/lib")
python_module_dir = str(python_libdir) + os.sep
# Many systems can have a problem with the Python path
if 'PYTHONPATH' in os.environ:
announce("System PYTHONPATH='%s'" % os.environ['PYTHONPATH'])
else:
announce("System PYTHONPATH is empty")
announce("Ensure your PYTHONPATH includes %s" % python_module_dir,
end=True)
python_module_dir += 'gps'
py_config_text = config.GetPythonValue('config vars',
PYTHON_SYSCONFIG_IMPORT,
PYTHON_CONFIG_CALL,
brief=True)
py_config_text = polystr(py_config_text)
py_config_vars = ast.literal_eval(py_config_text)
py_config_vars = [[] if x is None else x for x in py_config_vars]
python_config = dict(zip(PYTHON_CONFIG_NAMES, py_config_vars))
# debug
# announce(python_config)
# aiogps is only available on Python >= 3.6
sysver = config.GetPythonValue('target version',
'import sys',
'"%d.%d" % sys.version_info[0:2]')
if tuple(map(int, sysver.split("."))) < (3, 6):
config.env['aiogps'] = False
announce("WARNING: Python%s too old (need 3.6): "
"gps/aiogps.py will not be installed" %
(sysver), end=True)
else:
config.env['aiogps'] = True
# check for pyserial
if not config.GetPythonValue('module serial (pyserial)',
'import serial', '"found"'):
# no pyserial, used by ubxtool and zerk
announce("WARNING: ubxtool and zerk are missing optional "
"runtime module serial", end=True)
config.env['xgps_deps'] = True
# check for pycairo
if not config.GetPythonValue('module cairo (pycairo)',
'import cairo', '"found"'):
# no pycairo, used by xgps, xgpsspeed
config.env['xgps_deps'] = False
announce("WARNING: Python module cairo (pycairo) not found.")
# check for pygobject
if not config.GetPythonValue('module gi (pygobject)',
'import gi', '"found"'):
# no pygobject, used by xgps, xgpsspeed
config.env['xgps_deps'] = False
announce("WARNING: Python module gi (pygobject) not found.")
# gtk+ needed by pygobject
if not config.CheckPKG('gtk+-3.0'):
config.env['xgps_deps'] = False
announce("WARNING: gtk+-3.0 not found.")
if not config.env['xgps_deps']:
announce("WARNING: xgps and xgpsspeed are missing runtime "
"dependencies", end=True)
if not env['xgps']:
# xgps* turned off by option
config.env['xgps_deps'] = False
# check for matplotlib
if not config.GetPythonValue('module matplotlib',
'import matplotlib', '"found"'):
# no matplotlib, used by gpsplot
announce("WARNING: gpsplot is missing required "
"runtime module matplotlib", end=True)
config.env['PYTHON'] = target_python_path
# For regress-driver
config.env['ENV']['PYTHON'] = target_python_path
# get a list of the files from git, so they can go in distribution zip/tar
distfiles = config.TryAction("git ls-files > $TARGET")[1]
distfiles = polystr(distfiles).split()
# add in the built man pages, zip and tar files must contain man pages.
distfiles += all_manpages.keys()
env = config.Finish()
# All configuration should be finished. env can now be modified.
# NO CONFIG TESTS AFTER THIS POINT!
qt_env = None
if not (cleaning or helping):
# Be explicit about what we're doing.
changelatch = False
for (name, default, helpd) in boolopts + nonboolopts + pathopts:
if env[name] != env.subst(default):
if not changelatch:
announce("Altered configuration variables:")
changelatch = True
announce("%s = %s (default %s): %s"
% (name, env[name], env.subst(default), helpd))
if not changelatch:
announce("All configuration flags are defaulted.")
# Should we build the Qt binding?
if env["qt"] and env["shared"]:
qt_env = env.Clone()
qt_env.MergeFlags('-DUSE_QT')
qt_env.Append(OBJPREFIX='qt-')
try:
qt_env.MergeFlags(pkg_config(qt_net_name))
except OSError:
announce("pkg_config is confused about the state of %s."
% qt_net_name)
qt_env = None
# Set up for Python coveraging if needed
pycov_path = None
if have_coverage and env['coveraging'] and env['python_coverage']:
pycov_default = opts.options[opts.keys().index('python_coverage')].default
pycov_current = env['python_coverage']
pycov_list = pycov_current.split()
if env.GetOption('num_jobs') > 1 and pycov_current == pycov_default:
pycov_list.append('--parallel-mode')
# May need absolute path to coveraging tool if 'PythonXX' is prefixed
pycov_path = env.WhereIs(pycov_list[0])
if pycov_path:
pycov_list[0] = pycov_path
env['PYTHON_COVERAGE'] = ' '.join(pycov_list)
env['ENV']['PYTHON_COVERAGE'] = ' '.join(pycov_list)
else:
env['python_coverage'] = '' # So we see it in the options
# Two shared libraries provide most of the code for the C programs
# gpsd client library
libgps_sources = [
"libgps/ais_json.c",
"libgps/bits.c",
"libgps/gpsdclient.c",
"libgps/gps_maskdump.c", # generated
"libgps/gpsutils.c",
"libgps/hex.c",
"libgps/json.c",
"libgps/libgps_core.c",
"libgps/libgps_dbus.c",
"libgps/libgps_json.c",
"libgps/libgps_shm.c",
"libgps/libgps_sock.c",
"libgps/netlib.c",
"libgps/ntpshmread.c",
"libgps/os_compat.c",
"libgps/rtcm2_json.c",
"libgps/rtcm3_json.c",
"libgps/shared_json.c",
"libgps/timespec_str.c",
]
# Client sources not to be built as C++ when building the Qt library.
libgps_c_only = set([
"libgps/ais_json.c",
"libgps/json.c",
"libgps/libgps_json.c",
"libgps/os_compat.c",
"libgps/rtcm2_json.c",
"libgps/rtcm3_json.c",
"libgps/shared_json.c",
"libgps/timespec_str.c",
])
if env['libgpsmm']:
libgps_sources.append("libgps/libgpsmm.cpp")
# gpsd server library
libgpsd_sources = [
"gpsd/bsd_base64.c",
"gpsd/crc24q.c",
"drivers/driver_ais.c",
"drivers/driver_evermore.c",
"drivers/driver_garmin.c",
"drivers/driver_garmin_txt.c",
"drivers/driver_geostar.c",
"drivers/driver_greis.c",
"drivers/driver_greis_checksum.c",
"drivers/driver_italk.c",
"drivers/driver_navcom.c",
"drivers/driver_nmea0183.c",
"drivers/driver_nmea2000.c",
"drivers/driver_oncore.c",
"drivers/driver_rtcm2.c",
"drivers/driver_rtcm3.c",
"drivers/drivers.c",
"drivers/driver_sirf.c",
"drivers/driver_skytraq.c",
"drivers/driver_superstar2.c",
"drivers/driver_tsip.c",
"drivers/driver_ubx.c",
"drivers/driver_zodiac.c",
"gpsd/geoid.c",
"gpsd/gpsd_json.c",
"gpsd/isgps.c",
"gpsd/libgpsd_core.c",
"gpsd/matrix.c",
"gpsd/net_dgpsip.c",
"gpsd/net_gnss_dispatch.c",
"gpsd/net_ntrip.c",
"gpsd/ntpshmwrite.c",
"gpsd/packet.c",
"gpsd/ppsthread.c",
"gpsd/pseudoais.c",
"gpsd/pseudonmea.c",
"gpsd/serial.c",
"gpsd/subframe.c",
"gpsd/timebase.c",
]
# Build ffi binding
#
packet_ffi_extension = [
"gpsd/crc24q.c",
"drivers/driver_greis_checksum.c",
"drivers/driver_rtcm2.c",
"libgps/gpspacket.c",
"gpsd/isgps.c",
"libgps/hex.c",
"libgps/os_compat.c",
"gpsd/packet.c",
]
if env["shared"]:
def GPSLibrary(env, target, source, version, parse_flags=None):
# Note: We have a possibility of getting either Object or file
# list for sources, so we run through the sources and try to make
# them into SharedObject instances.
obj_list = []
for s in Flatten(source):
if isinstance(s, str):
obj_list.append(env.SharedObject(s))
else:
obj_list.append(s)
return env.SharedLibrary(target=target,
source=obj_list,
parse_flags=parse_flags,
SHLIBVERSION=version)
def GPSLibraryInstall(env, libdir, source, version):
# note: osX lib name s/b libgps.VV.dylib
# where VV is libgps_version_current
inst = env.InstallVersionedLib(libdir, source, SHLIBVERSION=version)
return inst
else:
def GPSLibrary(env, target, source, version, parse_flags=None):
return env.StaticLibrary(target,
[env.StaticObject(s) for s in source],
parse_flags=parse_flags)
def GPSLibraryInstall(env, libdir, source, version):
return env.Install(libdir, source)
libgps_shared = GPSLibrary(env=env,
target="gps",
source=libgps_sources,
version=libgps_version,
parse_flags=rtlibs + libgps_flags)
libgps_static = env.StaticLibrary(
target="gps_static",
source=[env.StaticObject(s) for s in libgps_sources],
parse_flags=rtlibs)
libgpsd_static = env.StaticLibrary(
target="gpsd",
source=[env.StaticObject(s, parse_flags=usbflags + bluezflags)
for s in libgpsd_sources],
parse_flags=usbflags + bluezflags)
# FFI library must always be shared, even with shared=no.
packet_ffi_objects = [env.SharedObject(s) for s in packet_ffi_extension]
packet_ffi_shared = env.SharedLibrary(target="gpsdpacket",
source=packet_ffi_objects,
SHLIBVERSION=libgps_version,
parse_flags=rtlibs + libgps_flags)
libraries = [libgps_shared, packet_ffi_shared]
# Only attempt to create the qt library if we have shared turned on
# otherwise we have a mismash of objects in library
if qt_env:
qtobjects = []
qt_flags = qt_env['CFLAGS']
for c_only in (
'--disable-excess-fp-precision',
'-fexcess-precision=standard',
'-Wmissing-prototypes',
'-Wstrict-prototypes',
'-Wmissing-declarations'
):
if c_only in qt_flags:
qt_flags.remove(c_only)
# Qt binding object files have to be renamed as they're built to avoid
# name clashes with the plain non-Qt object files. This prevents the
# infamous "Two environments with different actions were specified
# for the same target" error.
for src in libgps_sources:
if src not in libgps_c_only:
compile_with = qt_env['CXX']
compile_flags = qt_flags
else:
compile_with = qt_env['CC']
compile_flags = qt_env['CFLAGS']
qtobjects.append(qt_env.SharedObject(src,
CC=compile_with,
CFLAGS=compile_flags))
compiled_qgpsmmlib = GPSLibrary(env=qt_env,
target="Qgpsmm",
source=qtobjects,
version=libgps_version,
parse_flags=libgps_flags)
libraries.append(compiled_qgpsmmlib)
# The libraries have dependencies on system libraries
# libdbus appears multiple times because the linker only does one pass.
gpsflags = mathlibs + rtlibs + dbusflags
gpsdflags = usbflags + bluezflags + gpsflags
# Source groups
gpsd_sources = [
'gpsd/dbusexport.c',
'gpsd/gpsd.c',
'gpsd/shmexport.c',
'gpsd/timehint.c'
]
if env['systemd']:
gpsd_sources.append("gpsd/sd_socket.c")
gpsmon_sources = [
'gpsmon/gpsmon.c',
'gpsmon/monitor_garmin.c',
'gpsmon/monitor_italk.c',
'gpsmon/monitor_nmea0183.c',
'gpsmon/monitor_oncore.c',
'gpsmon/monitor_sirf.c',
'gpsmon/monitor_superstar2.c',
'gpsmon/monitor_tnt.c',
'gpsmon/monitor_ubx.c',
]
# Python dependencies
# For generated dependencies, this causes them to be generated as needed.
# For non-generated dependencies, it causes them to be duplicated into
# the build tree as needed.
# Symlink creator for uplevel access to the 'gps' package
def PylibLink(target, source, env):
_ = source, env
os.symlink('../gps', target[0].get_path())
# All installed python programs
# All are templated
python_progs = [
"clients/gegps",
"clients/gpscat",
"clients/gpscsv",
"clients/gpsplot",
"clients/gpsprof",
"clients/gpssubframe",
"clients/ubxtool",
"clients/zerk",
"gpsfake",
]
if env['xgps']:
python_progs.append("clients/xgps")
python_progs.append("clients/xgpsspeed")
# Import dependencies
# Update these whenever the imports change
# Internal imports within 'gps' package
env.Depends('gps/__init__.py', ['gps/gps.py', 'gps/misc.py'])
env.Depends('gps/aiogps.py', ['gps/client.py', 'gps/gps.py', 'gps/misc.py'])
env.Depends('gps/client.py', ['gps/misc.py', 'gps/watch_options.py'])
env.Depends('gps/gps.py',
['gps/client.py', 'gps/misc.py', 'gps/watch_options.py'])
env.Depends('gps/fake.py', 'gps/packet.py')
env.Depends('gps/packet.py', 'gps/misc.py')
# All Python programs import the 'gps' package
env.Depends(python_progs, 'gps/__init__.py')
# Additional specific import cases
env.Depends('clients/gpscat', ['gps/packet.py', 'gps/misc.py'])
env.Depends('clients/gpsplot', 'gps/clienthelpers.py')
env.Depends('clients/gpsprof', 'gps/clienthelpers.py')
env.Depends('clients/ubxtool', 'gps/ubx.py')
env.Depends('clients/xgps', 'gps/clienthelpers.py')
env.Depends('clients/xgpsspeed', 'gps/clienthelpers.py')
env.Depends('clients/zerk', 'gps/misc.py')
env.Depends('gpsfake', ['gps/fake.py', 'gps/misc.py'])
# Symlink for the clients to find the 'gps' package in the build tree
env.Depends(python_progs, env.Command('clients/gps', '', PylibLink))
# Non-import dependencies
# Dependency on program
env.Depends('regress-driver', 'gpsfake')
# Dependency on FFI packet library
env.Depends('gps/packet.py', packet_ffi_shared)
# Production programs
cgps = env.Program('clients/cgps', ['clients/cgps.c'],
LIBS=[libgps_static],
parse_flags=gpsflags + ncurseslibs)
gps2udp = env.Program('clients/gps2udp', ['clients/gps2udp.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
gpsctl = env.Program('gpsctl', ['gpsctl.c'],
LIBS=[libgpsd_static, libgps_static],
parse_flags=gpsdflags + gpsflags)
gpsd = env.Program('gpsd/gpsd', gpsd_sources,
LIBS=[libgpsd_static, libgps_static],
parse_flags=gpsdflags + gpsflags)
gpsdctl = env.Program('clients/gpsdctl', ['clients/gpsdctl.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
gpsdecode = env.Program('clients/gpsdecode', ['clients/gpsdecode.c'],
LIBS=[libgpsd_static, libgps_static],
parse_flags=gpsdflags + gpsflags)
# FIXME: gpsmon should not link to gpsd server sources!
gpsmon = env.Program('gpsmon/gpsmon', gpsmon_sources,
LIBS=[libgpsd_static, libgps_static],
parse_flags=gpsdflags + gpsflags + ncurseslibs)
gpspipe = env.Program('clients/gpspipe', ['clients/gpspipe.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
gpsrinex = env.Program('clients/gpsrinex', ['clients/gpsrinex.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
gpssnmp = env.Program('clients/gpssnmp', ['clients/gpssnmp.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
gpxlogger = env.Program('clients/gpxlogger', ['clients/gpxlogger.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
lcdgps = env.Program('clients/lcdgps', ['clients/lcdgps.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
ntpshmmon = env.Program('clients/ntpshmmon', ['clients/ntpshmmon.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
ppscheck = env.Program('clients/ppscheck', ['clients/ppscheck.c'],
LIBS=[libgps_static],
parse_flags=gpsflags)
bin_binaries = []
bin_scripts = []
sbin_binaries = []
if env["gpsd"]:
sbin_binaries += [gpsd]
if env["gpsdclients"]:
sbin_binaries += [gpsdctl]
bin_binaries += [
gps2udp,
gpsctl,
gpsdecode,
gpspipe,
gpsrinex,
gpssnmp,
gpxlogger,
lcdgps
]
bin_scripts += [
'clients/gpsdebuginfo',
]
if env["timeservice"] or env["gpsdclients"]:
bin_binaries += [ntpshmmon]
if tiocmiwait:
bin_binaries += [ppscheck]
if env["ncurses"]:
bin_binaries += [cgps, gpsmon]
else:
announce("WARNING: ncurses not found, not building cgps or gpsmon.",
end=True)
# Test programs - always link locally and statically
test_bits = env.Program('tests/test_bits',
[libgps_static, 'tests/test_bits.c'],
LIBS=[libgps_static])
test_float = env.Program('tests/test_float', ['tests/test_float.c'])
test_geoid = env.Program('tests/test_geoid',
[libgpsd_static, libgps_static, 'tests/test_geoid.c'],
LIBS=[libgpsd_static, libgps_static],
parse_flags=gpsdflags)
test_gpsdclient = env.Program('tests/test_gpsdclient',
[libgps_static, 'tests/test_gpsdclient.c'],
LIBS=[libgps_static, 'm'])
test_matrix = env.Program('tests/test_matrix',
[libgpsd_static, libgps_static, 'tests/test_matrix.c'],
LIBS=[libgpsd_static, libgps_static],
parse_flags=gpsdflags)
test_mktime = env.Program('tests/test_mktime',
[libgps_static, 'tests/test_mktime.c'],
LIBS=[libgps_static], parse_flags=mathlibs + rtlibs)
test_packet = env.Program('tests/test_packet',
[libgpsd_static, libgps_static,'tests/test_packet.c'],
LIBS=[libgpsd_static, libgps_static],
parse_flags=gpsdflags)
test_timespec = env.Program('tests/test_timespec', ['tests/test_timespec.c'],
LIBS=[libgpsd_static, libgps_static],
parse_flags=gpsdflags)
test_trig = env.Program('tests/test_trig', ['tests/test_trig.c'],
parse_flags=mathlibs)
# test_libgps for glibc older than 2.17
test_libgps = env.Program('tests/test_libgps',
[libgps_static, 'tests/test_libgps.c'],
LIBS=[libgps_static],
parse_flags=mathlibs + rtlibs + dbusflags)
if env['socket_export']:
test_json = env.Program(
'tests/test_json',
[libgps_static, 'tests/test_json.c'],
LIBS=[libgps_static],
parse_flags=mathlibs + rtlibs + usbflags + dbusflags)
else:
announce("test_json not building because socket_export is disabled")
test_json = None
# duplicate below?
test_gpsmm = env.Program('tests/test_gpsmm',
[libgps_static, 'tests/test_gpsmm.cpp'],
LIBS=[libgps_static],
parse_flags=mathlibs + rtlibs + dbusflags)
testprogs = [test_bits,
test_float,
test_geoid,
test_gpsdclient,
test_libgps,
test_matrix,
test_mktime,
test_packet,
test_timespec,
test_trig]
if env['socket_export'] or cleaning:
testprogs.append(test_json)
if env["libgpsmm"] or cleaning:
testprogs.append(test_gpsmm)
# Python programs
# python misc helpers and stuff, not to be installed
python_misc = [
"libgps/jsongen.py",
"maskaudit.py",
"tests/test_clienthelpers.py",
"tests/test_misc.py",
"tests/test_xgps_deps.py",
"www/gpscap.py",
"valgrind-audit.py"
]
# Dependencies for imports in test programs
env.Depends('tests/test_clienthelpers.py',
['gps/__init__.py', 'gps/clienthelpers.py', 'gps/misc.py'])
env.Depends('tests/test_misc.py', ['gps/__init__.py', 'gps/misc.py'])
env.Depends('valgrind-audit.py', ['gps/__init__.py', 'gps/fake.py'])
# Symlink for the programs to find the 'gps' package in the build tree
env.Depends(['tests/test_clienthelpers.py', 'tests/test_misc.py'],
env.Command('tests/gps', '', PylibLink))
# Glob() has to be run after all buildable objects defined.
# Glob(), by default, looks in the file tree, and current buildable objects.
python_modules = Glob('gps/*.py', strings=True) + ['gps/__init__.py',
'gps/gps.py',
'gps/packet.py']
# Remove the aiogps module if not configured
# Don't use Glob's exclude option, since it may not be available
if 'aiogps' in env and env['aiogps']:
python_misc.extend(["example_aiogps.py", "example_aiogps_run"])
else:
try:
python_modules.remove('gps/aiogps.py')
except ValueError:
pass
# Make PEP 241 Metadata 1.0.
# Why not PEP 314 (V1.1) or PEP 345 (V1.2)?
# V1.1 and V1.2 require a Download-URL to an installable binary
python_egg_info_source = """Metadata-Version: 1.0
Name: gps
Version: %s
Summary: Python libraries for the gpsd service daemon
Home-page: %s
Author: the GPSD project
Author-email: %s
License: BSD
Keywords: GPS
Description: The gpsd service daemon can monitor one or more GPS devices \
connected to a host computer, making all data on the location and movements \
of the sensors available to be queried on TCP port 2947.
Platform: UNKNOWN
""" % (gpsd_version, website, devmail)
python_egg_info = env.Textfile(target="gps-%s.egg-info" % (gpsd_version, ),
source=python_egg_info_source)
python_targets = ([python_egg_info] + python_progs + python_modules)
env.Command(target="include/packet_names.h", source="include/packet_states.h",
action="""
rm -f $TARGET &&\
sed -e '/^ *\\([A-Z][A-Z0-9_]*\\),/s// \"\\1\",/' <$SOURCE >$TARGET &&\
chmod a-w $TARGET""")
env.Textfile(target="include/gpsd_config.h", source=confdefs)
env.Command(target="libgps/gps_maskdump.c",
source=["maskaudit.py", "include/gps.h", "include/gpsd.h"],
action='''
rm -f $TARGET &&\
$SC_PYTHON $SOURCE -c $SRCDIR > $TARGET &&\
chmod a-w $TARGET''')
env.Command(target="libgps/ais_json.i", source="libgps/jsongen.py",
action='''\
rm -f $TARGET &&\
$SC_PYTHON $SOURCE --ais --target=parser >$TARGET &&\
chmod a-w $TARGET''')
if env['systemd']:
udevcommand = 'TAG+="systemd", ENV{SYSTEMD_WANTS}="gpsdctl@%k.service"'
else:
udevcommand = 'RUN+="%s/gpsd.hotplug"' % (env['udevdir'], )
# FIXME: why do this every time scons is called?
# $variantdir may not exist when this is run.
pythonize_header_match = re.compile(r'\s*#define\s+(\w+)\s+(\w+)\s*.*$[^\\]')
pythonized_header = ''
with open(env['SRCDIR'] + '/../include/gpsd.h') as sfp:
for content in sfp:
_match3 = pythonize_header_match.match(content)
if _match3:
if 'LOG' in content or 'PACKET' in content:
pythonized_header += ('%s = %s\n' %
(_match3.group(1), _match3.group(2)))
if ((env['python'] and
not cleaning and
not helping and
def_target_python != env['target_python'])):
# non-default target python.
if def_python_shebang == env['python_shebang']:
# default python shebang, update to match target python
if os.sep == env['target_python'][0]:
# full path, no need for env
env['python_shebang'] = env['target_python']
else:
# partial path, need env
env['python_shebang'] = "/usr/bin/env %s" % env['target_python']
announce("Setting python_shebang to %s" % env['python_shebang'])
# tuples for Substfile. To convert .in files to generated files.
substmap = (
('@ANNOUNCE@', annmail),
('@BUGTRACKER@', bugtracker),
('@CGIUPLOAD@', cgiupload),
('@CLONEREPO@', clonerepo),
('@DEVMAIL@', devmail),
('@DOWNLOAD@', download),
('@FORMSERVER@', formserver),
('@GENERATED@', "This code is generated by scons. Do not hand-hack it!"),
('@GITREPO@', gitrepo),
('@GPSAPIVERMAJ@', api_version_major),
('@GPSAPIVERMIN@', api_version_minor),
('@GPSPACKET@', packet_ffi_shared[0].get_path()),
('@ICONPATH@', installdir('icondir', add_destdir=False)),
('@INCLUDEDIR@', installdir('includedir', add_destdir=False)),
('@IRCCHAN@', ircchan),
('@ISSUES@', bugtracker),
('@LIBDIR@', installdir('libdir', add_destdir=False)),
('@LIBGPSVERSION@', libgps_version),
('@MAILMAN@', mailman),
('@MAINPAGE@', mainpage),
('@MASTER@', 'DO NOT HAND_HACK! THIS FILE IS GENERATED'),
('@MIBPATH', installdir('mibdir', add_destdir=False)),
('@PREFIX@', env['prefix']),
('@PROJECTPAGE@', projectpage),
# PEP 394 and 397 python shebang
('@PYSHEBANG@', env['python_shebang']),
('@PYPACKETH@', pythonized_header),
('@QTVERSIONED@', env['qt_versioned']),
('@RUNDIR@', env['rundir']),
('@SBINDIR@', installdir('sbindir', add_destdir=False)),
('@SCPUPLOAD@', scpupload),
('@SHAREPATH@', installdir('sharedir', add_destdir=False)),
('@SITENAME@', sitename),
('@SITESEARCH@', sitesearch),
('@SUPPORT@', 'https://gpsd.io/SUPPORT.html'),
('@TIPLINK@', tiplink),
('@TIPWIDGET@', tipwidget),
('@UDEVCOMMAND@', udevcommand),
('@USERMAIL@', usermail),
('@VERSION@', gpsd_version),
('@WEBSITE@', website),
)
# Keep time-dependent version separate
# FIXME: Come up with a better approach with reproducible builds
substmap_dated = substmap + (('@DATE@', time.asctime()),)
# explicit templated files
templated = {
"android/gpsd_config": "android/gpsd_config.in",
"clients/gegps": "clients/gegps.py.in",
"clients/gpscat": "clients/gpscat.py.in",
"clients/gpscsv": "clients/gpscsv.py.in",
"clients/gpsd.php": "clients/gpsd.php.in",
"clients/gpsplot": "clients/gpsplot.py.in",
"clients/gpsprof": "clients/gpsprof.py.in",
"clients/gpssubframe": "clients/gpssubframe.py.in",
"clients/ubxtool": "clients/ubxtool.py.in",
"clients/xgps": "clients/xgps.py.in",
"clients/xgpsspeed": "clients/xgpsspeed.py.in",
"clients/zerk": "clients/zerk.py.in",
"contrib/ntpshmviz": "contrib/ntpshmviz.py.in",
"contrib/skyview2svg.py": "contrib/skyview2svg.py.in",
"contrib/webgps": "contrib/webgps.py.in",
"control": "control.in",
"gpsd.rules": "gpsd.rules.in",
"gpsfake": "gpsfake.py.in",
"gps/gps.py": "gps/gps.py.in",
"gps/__init__.py": "gps/__init__.py.in",
"gps/packet.py": "gps/packet.py.in",
"libgps.pc": "libgps.pc.in",
"libQgpsmm.prl": "libQgpsmm.prl.in",
"packaging/deb/etc_default_gpsd": "packaging/deb/etc_default_gpsd.in",
"packaging/deb/etc_init.d_gpsd": "packaging/deb/etc_init.d_gpsd.in",
"packaging/gpsd-setup.py": "packaging/gpsd-setup.py.in",
"packaging/rpm/gpsd.init": "packaging/rpm/gpsd.init.in",
"packaging/rpm/gpsd.spec": "packaging/rpm/gpsd.spec.in",
"packaging/X11/xgps.desktop": "packaging/X11/xgps.desktop.in",
"packaging/X11/xgpsspeed.desktop": "packaging/X11/xgpsspeed.desktop.in",
"Qgpsmm.pc": "Qgpsmm.pc.in",
"systemd/gpsdctl@.service": "systemd/gpsdctl@.service.in",
"systemd/gpsd.service": "systemd/gpsd.service.in",
"systemd/gpsd.socket": "systemd/gpsd.socket.in",
"www/faq.html": "www/faq.html.in",
"www/gps_report.cgi": "www/gps_report.cgi.in",
"www/gpscap.py": "www/gpscap.py.in",
"www/hacking.html": "www/hacking.html.in",
"www/hardware-head.html": "www/hardware-head.html.in",
"www/index.html": "www/index.html.in",
"www/troubleshooting.html": "www/troubleshooting.html.in",
}
for (tgt, src) in templated.items():
iswww = tgt.startswith('www/')
# Only www pages need @DATE@ expansion, which forces rebuild every time
subst = substmap_dated if iswww else substmap
# use scons built-in Substfile()
builder = env.Substfile(target=tgt, source=src, SUBST_DICT=subst)
# default to building all built targets, except www
# FIXME: Render this unnecessary
if not iswww:
env.Default(builder)
# set read-only to alert people trying to edit the files.
env.AddPostAction(builder, 'chmod -w $TARGET')
if ((src.endswith(".py.in") or
tgt in python_progs or
tgt in ['contrib/ntpshmviz', 'contrib/webgps'])):
# set python files to executable
env.AddPostAction(builder, 'chmod +x $TARGET')
# When the URL declarations change, so must the generated web pages
for fn in glob.glob("www/*.in"):
env.Depends(fn[:-3], ["SConstruct", "SConscript"])
# asciidoc documents
asciidocs = []
man_env = env.Clone()
if man_env.GetOption('silent'):
man_env['SPAWN'] = filtered_spawn # Suppress stderr chatter
manpage_targets = []
maninstall = []
if adoc_prog:
adoc_args_m = ('-v -a gpsdweb=%s -a gpsdver=%s' % (website, gpsd_version))
adoc_args = (adoc_args_m + ' -a docinfo=shared')
for (man, src) in all_manpages.items():
# build it
# make nroff man page
asciidocs.append(man)
env.Command(man, src,
'%s -b manpage %s -o $TARGET $SOURCE' %
(adoc_prog, adoc_args_m))
# install nroff man page
section = man.split(".")[1]
dest = os.path.join(installdir('mandir'), "man" + section)
maninstall.append(env.Install(target=dest, source=man))
# make html man page
target = 'www/%s.html' % os.path.basename(man[:-2])
env.Depends(src, ['www/docinfo.html', 'www/inc-menu.adoc'])
tgt = env.Command(target, src,
'%s -b html5 %s -a docinfodir=../www/ -o $TARGET $SOURCE' %
(adoc_prog, adoc_args))
asciidocs.append(tgt)
else:
# can't build man pages, maybe we have pre-built ones?
for man in Glob('man/*.?', strings=True):
section = man.split(".")[1]
dest = os.path.join(installdir('mandir'), "man" + section)
maninstall.append(env.Install(target=dest, source=man))
# The hardware page
env.Command('www/hardware.html',
['www/gpscap.py',
'www/hardware-head.html',
'www/gpscap.ini',
'www/hardware-tail.html'],
['cd %s/www; (cat hardware-head.html && PYTHONIOENCODING=utf-8 '
'$SC_PYTHON gpscap.py && cat hardware-tail.html) '
'> hardware.html' % variantdir])
# doc to install in 'docdir'
docinstall = env.Install(target=installdir('docdir'), source=doc_files)
if adoc_prog:
adocfiles = (('build', 'www/building'),
('INSTALL', 'www/installation'),
('README', 'www/README'),
('SUPPORT', 'www/SUPPORT'),
('www/AIVDM', 'www/AIVDM'),
('www/client-howto', 'www/client-howto'),
('www/gpsd-numbers-matter',
'www/gpsd-numbers-matter'),
('www/gpsd-client-example-code',
'www/gpsd-client-example-code'),
('www/gpsd-time-service-howto',
'www/gpsd-time-service-howto'),
('www/internals', 'www/internals'),
('www/NMEA', 'www/NMEA'),
('www/ppp-howto', 'www/ppp-howto'),
('www/protocol-evolution', 'www/protocol-evolution'),
('www/protocol-transition', 'www/protocol-transition'),
('www/replacing-nmea', 'www/replacing-nmea'),
('www/time-service-intro', 'www/time-service-intro'),
('www/ubxtool-examples', 'www/ubxtool-examples'),
('www/writing-a-driver', 'www/writing-a-driver'),
('www/performance/performance',
'www/performance/performance'),
)
for src, tgt in adocfiles:
target = '%s.html' % tgt
env.Depends(src, ['www/docinfo.html',
'www/example1.c.txt',
'www/example2.py.txt',
'www/inc-menu.adoc'])
tgt = env.Command(target, '%s.adoc' % src,
'%s -b html5 %s -o $TARGET $SOURCE' %
(adoc_prog, adoc_args))
asciidocs.append(tgt)
# Non-asciidoc, plain html webpages only
# example1.c has a .txt extension to avoid an scons bug where the
# install rule for gps.h is invoked during the build step when there
# is a CPPFLAGS matching gpsd's install prefix.
htmlpages = [
'www/bt.html',
'www/bu_303b.html',
'www/example1.c.txt',
'www/example2.py.txt',
'www/excellence.html',
'www/for-vendors.html',
'www/future.html',
'www/gps-hacking.html',
'www/gypsy.html',
'www/hall-of-shame.html',
'www/hardware.html', # built above
'www/history.html',
'www/references.html',
'www/reliability.html',
'www/upstream-bugs.html',
'www/wishlist.html',
'www/xgps-sample.html',
]
wwwpage_targets = []
# webapges from .in files
webpages_in = list(map(lambda f: f[3:-3], glob.glob("../www/*.in")))
webpages_in_not = ('www/hardware-tail.html')
for fn in webpages_in_not:
if fn in webpages_in:
webpages_in.remove(fn)
# webapges extras: images, css, js
webpages_x_list = ('../www/*.css',
'../www/*.gif',
'../www/*.ico',
'../www/*.js',
'../www/*.png',
'../www/*.svg',
'../www/performance/*css',
'../www/performance/*png',
'../www/performance/*txt',
)
webpages_x = []
for glb in webpages_x_list:
webpages_x += list(map(lambda f: f[3:], glob.glob(glb)))
webpages_static = [('www/NEWS', 'NEWS'),
('www/TODO', 'TODO'),
('www/gpsdebuginfo', 'clients/gpsdebuginfo'),
]
for page in webpages_static:
targ = env.Command(page[0], page[1], 'cp $SOURCE $TARGET')
webpages_x += targ
webpages = htmlpages + asciidocs + wwwpage_targets + webpages_in + webpages_x
www = env.Alias('www', webpages)
# On the Mac (at least), some X11 programs launch the X11 server even when
# they're not actually using the display. Clearing DISPLAY in the
# environment avoids this. We leave the main environment untouched just in
# case it might be needed.
nox11_env = env['ENV'].copy()
nox11_env['DISPLAY'] = ''
# The diagram editor dia is required in order to edit the diagram masters
if have_dia:
env.Command("www/cycle.svg", ["www/cycle.dia"],
["cd %s; dia -e www/cycle.svg www/cycle.dia" % variantdir],
ENV=nox11_env)
packing = [
'packaging/deb/etc_default_gpsd',
'packaging/deb/etc_init.d_gpsd',
'packaging/gpsd-setup.py',
'packaging/README.PACKAGERS',
'packaging/rpm/gpsd.init',
'packaging/rpm/gpsd.spec',
'packaging/rpm/gpsd.sysconfig',
'packaging/X11/xgps.desktop',
'packaging/X11/xgpsspeed.desktop',
]
# Where it all comes together
build_src = [
bin_binaries,
bin_scripts,
"clients/gpsd.php",
"gpsd.rules",
icon_files,
"libgps.pc",
libraries,
manpage_targets,
mib_files,
packing,
sbin_binaries,
webpages,
]
if env['python']:
build_src.append(python_targets)
build = env.Alias('build', build_src)
# For debug, to dump the build environment
# print(env.Dump())
if [] == COMMAND_LINE_TARGETS:
# 'build' is default target
Default('build')
if qt_env:
# duplicate above?
test_qgpsmm = env.Program('tests/test_qgpsmm', ['tests/test_gpsmm.cpp'],
LIBPATH=['.'],
OBJPREFIX='qt-',
LIBS=['Qgpsmm'])
build_qt = qt_env.Alias('build', [compiled_qgpsmmlib, test_qgpsmm])
qt_env.Default(*build_qt)
testprogs.append(test_qgpsmm)
# Installation and deinstallation
# Not here because too distro-specific: udev rules, desktop files, init scripts
# It's deliberate that we don't install gpsd.h. It's full of internals that
# third-party client programs should not see.
headerinstall = [env.Install(installdir('includedir'), x)
for x in ("include/libgpsmm.h", "include/gps.h")]
binaryinstall = []
binaryinstall.append(env.Install(installdir('sbindir'), sbin_binaries))
binaryinstall.append(env.Install(installdir('bindir'), bin_binaries))
binaryinstall.append(GPSLibraryInstall(env, installdir('libdir'),
libgps_shared,
libgps_version))
# FFI library is always shared
binaryinstall.append(env.InstallVersionedLib(installdir('libdir'),
packet_ffi_shared,
SHLIBVERSION=libgps_version))
if qt_env:
binaryinstall.append(GPSLibraryInstall(qt_env, installdir('libdir'),
compiled_qgpsmmlib, libgps_version))
if ((not env['debug'] and
not env['debug_opt'] and
not env['profiling'] and
not env['nostrip'] and
not env['target_platform'].startswith('darwin'))):
env.AddPostAction(binaryinstall, '$STRIP $TARGET')
binaryinstall.append(env.Install(installdir('bindir'), bin_scripts))
python_module_dir = str(python_libdir) + os.sep + 'gps'
python_modules_install = env.Install(DESTDIR + python_module_dir,
python_modules)
python_progs_install = env.Install(installdir('bindir'), python_progs)
python_egg_info_install = env.Install(DESTDIR + str(python_libdir),
python_egg_info)
python_install = [python_modules_install,
python_progs_install,
python_egg_info_install,
# We don't need the directory explicitly for the
# install, but we do need it for the uninstall
Dir(DESTDIR + python_module_dir)]
python_lint = (python_misc + python_modules + python_progs +
['SConstruct', 'SConscript'])
if env['python']:
# Check that Python modules compile properly
# FIXME: why not install some of the .pyc?
check_compile = []
for p in python_lint:
# split in two lines for readability
check_compile.append(
'cp %s/%s tmp.py; %s -tt -m py_compile tmp.py;' %
(variantdir, p, target_python_path))
# tmp.py may have inherited non-writable permissions
check_compile.append('rm -f tmp.py*')
python_compilation_regress = Utility('python-compilation-regress',
python_lint, check_compile)
# get version from each python prog
# this ensures they can run and gps_versions match
vchk = ''
pp = []
for p in python_progs:
if not env['xgps_deps']:
if p in ['clients/xgps', 'clients/xgpsspeed']:
# do not have xgps* dependencies, don't test
# FIXME: make these do -V w/o dependencies.
continue
# need to run in variantdir to find libgpsdpacket
tgt = Utility(
'version-%s' % p, p,
'cd %s; $PYTHON %s -V' % (variantdir, p),
ENV=nox11_env)
pp.append(tgt)
python_versions = env.Alias('python-versions', pp)
else:
python_install = []
pc_install = [env.Install(installdir('pkgconfig'), 'libgps.pc')]
if qt_env:
pc_install.append(qt_env.Install(installdir('pkgconfig'), 'Qgpsmm.pc'))
pc_install.append(qt_env.Install(installdir('libdir'), 'libQgpsmm.prl'))
# icons to install
docinstall += env.Install(target=installdir('icondir'), source=icon_files)
# MIB to install
mibinstall = env.Install(target=installdir('mibdir'), source=mib_files)
# and now we know everything to install
install_src = (binaryinstall +
docinstall +
headerinstall +
maninstall +
mibinstall +
pc_install +
python_install)
install = env.Alias('install', install_src)
def Uninstall(nodes):
deletes = []
for node in nodes:
if node.__class__ == install[0].__class__:
deletes.append(Uninstall(node.sources))
else:
deletes.append(Delete(str(node)))
return deletes
uninstall = env.Command('uninstall', '',
Flatten(Uninstall(Alias("install"))) or "")
env.AlwaysBuild(uninstall)
env.Precious(uninstall)
env.Alias('uninstall', uninstall)
# Target selection for '.' is badly broken. This is a general scons problem,
# not a glitch in this particular recipe. Avoid triggering the bug.
def error_action(target, source, env):
raise SCons.Error.UserError("Target selection for '.' is broken.")
AlwaysBuild(Alias(".", [], error_action))
#
# start audit checks section
#
# Putting in all these -U flags speeds up cppcheck and allows it to look
# at configurations we actually care about.
# https://github.com/danmar/cppcheck
cppcheck = Utility("cppcheck",
['build', "include/gpsd.h", "include/packet_names.h"],
"cppcheck -U__UNUSED__ -UUSE_QT -U__COVERITY__ "
"-U__future__ "
"-ULIMITED_MAX_CLIENTS -ULIMITED_MAX_DEVICES -UAF_UNSPEC "
"-UINADDR_ANY -U_WIN32 -U__CYGWIN__ "
"-UPATH_MAX -UHAVE_STRLCAT -UHAVE_STRLCPY -UHAVE_STRNLEN "
"-UIPTOS_LOWDELAY -UIPV6_TCLASS -UTCP_NODELAY -UTIOCMIWAIT "
"--template gcc "
"--enable=all --inline-suppr "
"--suppress='*:drivers/driver_proto.c' "
"--force $SRCDIR")
# Conflicts with pycodestyle:
# E121 continuation line under-indented for hanging indent
# E123 closing bracket does not match indentation of opening bracket's line
# Conflist with gpsd style
# W504 line break after binary operator
# --exit-zero always return success, so later audits will run
flake8 = Utility("flake8", python_lint,
['flake8 --ignore=E121,E122,E123,E241,E401,E501,W504,W602 '
'--exit-zero $SOURCES'])
# Additional Python readability style checks
# Oddly these only happen when called this way?
# E121 continuation line under-indented for hanging indent
# E123 closing bracket does not match indentation of opening bracket's line
# Conflicts with gpsd style
# W504 line break after binary operator
# exit 0 so the rest of the audit runs
pycodestyle = Utility("pep8", python_lint,
['pycodestyle --ignore=E121,E122,E123,E241,W504,W602 '
'$SOURCES; exit 0'])
# pep8 was renamed to pycodestyle, same thing
env.Alias('pycodestyle', pycodestyle)
# Sanity-check Python code.
# Bletch. We don't really want to suppress W0231 E0602 E0611 E1123,
# but Python 3 syntax confuses a pylint running under Python 2.
# There's an internal error in astroid that requires we disable some
# auditing. This is irritating as hell but there's no help for it short
# of an upstream fix.
# --exit-zero always return success, so later audits will run
pylint = Utility("pylint", python_lint, [
'pylint --rcfile=/dev/null --dummy-variables-rgx=\'^_\' '
'--exit-zero --msg-template='
'"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" '
'--reports=n --disable=F0001,C0103,C0111,C1001,C0301,C0122,C0302,'
'C0322,C0324,C0323,C0321,C0330,C0411,C0413,E1136,R0201,R0204,'
'R0801,'
'R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,W0110,W0201,'
'W0121,W0123,W0231,W0232,W0234,W0401,W0403,W0141,W0142,W0603,'
'W0614,W0640,W0621,W1504,E0602,E0611,E1101,E1102,E1103,E1123,'
'F0401,I0011 $SOURCES'])
# Try to make "scan-build" call the same scons
# executable that is currently executing this SConstruct.
# Check with scan-build, an analyzer, part of clang
scan_build = Utility("scan-build",
["include/gpsd.h", "include/packet_names.h"],
"scan-build " + scons_executable_name)
env.Alias('scan_build', scan_build) # For '_' vs. '-'
# Run a valgrind audit on the daemon - not in normal tests
valgrind = Utility('valgrind', [
'valgrind-audit.py', gpsd],
'$PYTHON $SRCDIR/valgrind-audit.py'
)
# Perform all (possible) local code-sanity checks (but not the Coverity scan).
audits = []
if have_cppcheck:
audits.append(cppcheck)
if have_flake8:
audits.append(flake8)
if have_pycodestyle:
audits.append(pycodestyle)
if have_pylint:
audits.append(pylint)
if have_scan_build:
audits.append(scan_build)
if have_valgrind:
audits.append(valgrind)
env.Alias('audit', audits)
#
# end audit checks section
#
# Regression tests begin here
#
# Note that the *-makeregress targets re-create the *.log.chk source
# files from the *.log source files.
# Unit-test the bitfield extractor
bits_regress = Utility('bits-regress', [test_bits], [
'$SRCDIR/tests/test_bits --quiet'
])
# Unit-test the deg_to_str() converter
deg_regress = Utility('deg-regress', [test_gpsdclient], [
'$SRCDIR/tests/test_gpsdclient'
])
# Unit-test the bitfield extractor
matrix_regress = Utility('matrix-regress', [test_matrix], [
'$SRCDIR/tests/test_matrix --quiet'
])
# MIB test
if have_smilint:
mib_regress = Utility('mib-regress', [mib_lint], [
'smilint -l 6 man/GPSD-MIB'
])
else:
mib_regress = []
# Regression-test NMEA 2000
if ((env["nmea2000"] and
have_canplayer)):
# the log files must be dependencies so they get copied into variant_dir
nmea2000_logs = Glob("test/nmea2000/*.log", strings=True)
nmea2000_tests = []
for nmea2000_log in nmea2000_logs:
# oddly this runs in build root, but needs to run in variant_dir
tgt = Utility(
'nmea2000-regress-' + nmea2000_log[:-4],
['tests/test_nmea2000', nmea2000_log, nmea2000_log + '.chk'],
' cd %s; $SRCDIR/tests/test_nmea2000 %s' %
(variantdir, nmea2000_log))
nmea2000_tests.append(tgt)
nmea2000_regress = env.Alias('nmea2000-regress', nmea2000_tests)
else:
nmea2000_regress = None
if not cleaning and not helping:
announce("NMEA2000 regression tests suppressed because rtcm104v2 is off "
"or canplayer is missing.")
# using regress-drivers requires socket_export being enabled and Python
if env['socket_export'] and env['python']:
# Regression-test the daemon.
# But first dump the platform and its delay parameters.
gps_herald = Utility(
'gps-herald', [gpsd, gpsctl, '$SRCDIR/gpsfake'],
'cd %s; $PYTHON $PYTHON_COVERAGE $SRCDIR/gpsfake -T' % variantdir)
gps_log_pattern = "test/daemon/*.log"
gps_logs = Glob(gps_log_pattern, strings=True)
gps_tests = []
for gps_log in gps_logs:
# oddly this runs in build root, but needs to run in variant_dir
tgt = Utility(
'gps-regress-' + gps_log[:-4],
[gps_herald, gps_log],
'cd %s; ./regress-driver -q -o -t $REGRESSOPTS %s' %
(variantdir, gps_log))
gps_tests.append(tgt)
gps_regress = env.Alias('gps-regress', gps_tests)
# the log files must be dependencies so they get copied into variant_dir
gpsfake_logs = Glob('test/daemon/*')
# Run the passthrough log in all transport modes for better coverage
gpsfake_tests = []
for name, opts in [['pty', ''], ['udp', '-u'], ['tcp', '-o -t']]:
# oddly this runs in build root, but needs to run in variant_dir
tgt = Utility(
'gpsfake-' + name,
[gps_herald, gpsfake_logs],
'cd %s; ./regress-driver $REGRESSOPTS -q %s %s' %
(variantdir, opts, 'test/daemon/passthrough.log'))
gpsfake_tests.append(tgt)
env.Alias('gpsfake-tests', gpsfake_tests)
# Build the regression tests for the daemon.
# Note: You'll have to do this whenever the default leap second
# changes in gpsd.h. Many drivers rely on the default until they
# get the current leap second.
gps_rebuilds = []
for gps_log in gps_logs:
# oddly this runs in build root, but needs to run in variant_dir
gps_rebuilds.append(Utility(
'gps-makeregress-' + gps_log[:-4],
[gps_herald, gps_log],
'cd %s; ./regress-driver -bq -o -t '
'$REGRESSOPTS %s ' % (variantdir, gps_log)))
if GetOption('num_jobs') <= 1:
Utility('gps-makeregress', gps_herald,
'cd %s; ./regress-driver -b $REGRESSOPTS %s' %
(variantdir, gps_log_pattern))
else:
env.Alias('gps-makeregress', gps_rebuilds)
else:
announce("GPS regression tests suppressed because socket_export "
"or python is off.")
gps_regress = None
gpsfake_tests = None
# To build an individual test for a load named foo.log, put it in
# test/daemon and do this:
# regress-driver -b test/daemon/foo.log
# Regression-test the RTCM decoder.
if env["rtcm104v2"]:
rtcm2_logs = ['test/sample.rtcm2', 'test/sample.rtcm2.chk']
# the log files must be dependencies so they get copied into variant_dir
rtcm_regress = Utility('rtcm-regress', [gpsdecode, rtcm2_logs], [
'@echo "Testing RTCM decoding..."',
'@for f in $SRCDIR/test/*.rtcm2; do '
' echo "\tTesting $${f}..."; '
' TMPFILE=`mktemp -t gpsd-test.chk-XXXXXXXXXXXXXX`; '
' $SRCDIR/clients/gpsdecode -u -j <$${f} >$${TMPFILE}; '
' diff -ub $${f}.chk $${TMPFILE} || echo "Test FAILED!"; '
' rm -f $${TMPFILE}; '
'done;',
'@echo "Testing idempotency of JSON dump/decode for RTCM2"',
'@TMPFILE=`mktemp -t gpsd-test.chk-XXXXXXXXXXXXXX`; '
'$SRCDIR/clients/gpsdecode -u -e -j <test/synthetic-rtcm2.json '
' >$${TMPFILE}; '
' grep -v "^#" test/synthetic-rtcm2.json | diff -ub - $${TMPFILE} '
' || echo "Test FAILED!"; '
' rm -f $${TMPFILE}; ',
])
else:
announce("RTCM2 regression tests suppressed because rtcm104v2 is off.")
rtcm_regress = None
# Rebuild the RTCM regression tests.
Utility('rtcm-makeregress', [gpsdecode], [
'for f in $SRCDIR/test/*.rtcm2; do '
' $SRCDIR/clients/gpsdecode -j <$${f} >$${f}.chk; '
'done'
])
# Regression-test the AIVDM decoder.
aivdm_chks = [['test/sample.aivdm', 'test/sample.aivdm.chk', '-u -c'],
['test/sample.aivdm', 'test/sample.aivdm.js.chk', '-j'],
['test/sample.aivdm', 'test/sample.aivdm.ju.chk', '-u -j'],
# Parse the unscaled json reference, dump it as unscaled json,
['test/sample.aivdm.ju.chk', 'test/sample.aivdm.ju.chk',
'-u -e -j'],
# Parse the unscaled json reference, dump it as scaled json,
['test/sample.aivdm.ju.chk', 'test/sample.aivdm.js.chk',
'-e -j'],
]
aivdm_regress = None
if env["aivdm"]:
# the log files must be dependencies so they get copied into variant_dir
aivdm_tests = []
aivdm_cnt = 0
for aivdm_log, aivdm_chk, aivdm_opt in aivdm_chks:
# oddly this runs in build root, but needs to run in variant_dir
tgt = Utility(
'aivdm-regress-%d' % aivdm_cnt,
[aivdm_log, aivdm_chk, gpsdecode],
['@echo "Testing AIVDM decoding w/ %s ..."' % aivdm_opt,
'$SRCDIR/clients/gpsdecode %s < %s | diff -ub %s -' %
(aivdm_opt, aivdm_log, aivdm_chk)])
aivdm_tests.append(tgt)
aivdm_cnt += 1
aivdm_regress = env.Alias('aivdm-regress', aivdm_tests)
else:
announce("AIVDM regression tests suppressed because aivdm is off.")
# Rebuild the AIVDM regression tests.
# Use root dir copies so the new .chk is back into root.
Utility('aivdm-makeregress', [gpsdecode], [
'for f in $SRCDIR/../test/*.aivdm; do '
' $SRCDIR/clients/gpsdecode -u -c <$${f} > $${f}.chk; '
' $SRCDIR/clients/gpsdecode -u -j <$${f} > $${f}.ju.chk; '
' $SRCDIR/clients/gpsdecode -j <$${f} > $${f}.js.chk; '
'done', ])
# Regression-test the packet getter.
packet_regress = UtilityWithHerald(
'Testing detection of invalid packets...',
'packet-regress', [test_packet],
['$SRCDIR/tests/test_packet | diff -u test/packet.test.chk -', ])
# Rebuild the packet-getter regression test
Utility('packet-makeregress', [test_packet], [
'$SRCDIR/tests/test_packet > test/packet.test.chk', ])
# Regression-test the geoid and variation tester.
geoid_regress = UtilityWithHerald(
'Testing the geoid and variation models...',
'geoid-regress', [test_geoid], ['$SRCDIR/tests/test_geoid'])
# Regression-test the calendar functions
time_regress = Utility('time-regress', [test_mktime], [
'$SRCDIR/tests/test_mktime'
])
# Regression test the unpacking code in libgps
# the log files must be dependencies so they get copied into variant_dir
clientlib_logs = ['test/clientlib/multipacket.log',
'test/clientlib/multipacket.log.chk']
unpack_regress = UtilityWithHerald(
'Testing the client-library sentence decoder...',
'unpack-regress', [test_libgps, 'regress-driver', clientlib_logs], [
'$SRCDIR/regress-driver $REGRESSOPTS -c'
' $SRCDIR/test/clientlib/*.log', ])
# Unit-test the bitfield extractor
misc_regress = Utility('misc-regress', [
'tests/test_clienthelpers.py',
'tests/test_misc.py', ],
[
'cd %s; %s tests/test_clienthelpers.py' %
(variantdir, target_python_path),
'cd %s; %s tests/test_misc.py' % (variantdir, target_python_path), ])
# Build the regression test for the sentence unpacker
Utility('unpack-makeregress', [test_libgps], [
'@echo "Rebuilding the client sentence-unpacker tests..."',
'$SRCDIR/regress-driver $REGRESSOPTS -c -b $SRCDIR/test/clientlib/*.log'
])
# Unit-test the JSON parsing
if env['socket_export']:
json_regress = Utility('json-regress', [test_json],
['$SRCDIR/tests/test_json'])
else:
json_regress = None
# Unit-test timespec math
timespec_regress = Utility('timespec-regress', [test_timespec], [
'$SRCDIR/tests/test_timespec'
])
# Unit-test float math
float_regress = Utility('float-regress', [test_float], [
'$SRCDIR/tests/test_float'
])
# Unit-test trig math
trig_regress = Utility('trig-regress', [test_trig], [
'$SRCDIR/tests/test_trig'
])
# consistency-check the driver methods
method_regress = UtilityWithHerald(
'Consistency-checking driver methods...',
'method-regress', [test_packet], [
'$SRCDIR/tests/test_packet -c >/dev/null', ])
# Test the xgps/xgpsspeed dependencies
if env['xgps_deps']:
test_xgps_deps = UtilityWithHerald(
'Testing xgps/xgpsspeed dependencies (since xgps=yes)...',
'test-xgps-deps', ['$SRCDIR/tests/test_xgps_deps.py'], [
'$PYTHON $SRCDIR/tests/test_xgps_deps.py'])
else:
test_xgps_deps = None
# Run all normal regression tests
describe = UtilityWithHerald(
'Run normal regression tests for %s...' % gpsd_revision.strip(),
'describe', [], [])
# Delete all test programs
testclean = Utility('testclean', [], 'rm -fr %s/tests' % variantdir)
test_nondaemon = [
aivdm_regress,
bits_regress,
deg_regress,
describe,
float_regress,
geoid_regress,
json_regress,
matrix_regress,
method_regress,
mib_regress,
packet_regress,
rtcm_regress,
test_xgps_deps,
time_regress,
timespec_regress,
# trig_regress, # not ready
]
if env['python']:
test_nondaemon.append(misc_regress)
test_nondaemon.append(python_compilation_regress)
test_nondaemon.append(python_versions)
test_nondaemon.append(unpack_regress)
if env['socket_export']:
test_nondaemon.append(test_json)
if env['libgpsmm']:
test_nondaemon.append(test_gpsmm)
if qt_env:
test_nondaemon.append(test_qgpsmm)
test_quick = test_nondaemon + [gpsfake_tests]
test_noclean = test_quick + [nmea2000_regress, gps_regress]
env.Alias('test-nondaemon', test_nondaemon)
env.Alias('test-quick', test_quick)
check = env.Alias('check', test_noclean)
env.Alias('testregress', check)
env.Alias('build-tests', testprogs)
build_all = env.Alias('build-all', build + testprogs)
# Remove all shared-memory segments. Normally only needs to be run
# when a segment size changes.
shmclean = Utility('shmclean', [], ["ipcrm -M 0x4e545030;"
"ipcrm -M 0x4e545031;"
"ipcrm -M 0x4e545032;"
"ipcrm -M 0x4e545033;"
"ipcrm -M 0x4e545034;"
"ipcrm -M 0x4e545035;"
"ipcrm -M 0x4e545036;"
"ipcrm -M 0x47505345;"
])
# The website directory
#
# None of these productions are fired by default.
# The content they handle is the GPSD website, not included in
# release tarballs.
# Documentation
# Paste 'scons --quiet validation-list' to a batch validator such as
# http://htmlhelp.com/tools/validator/batch.html.en
def validation_list(target, source, env):
for page in glob.glob("www/*.html"):
if '-head' not in page:
fp = open(page)
if "Valid HTML" in fp.read():
print(os.path.join(website, os.path.basename(page)))
fp.close()
Utility("validation-list", [www], validation_list)
# Experimenting with pydoc. Not yet fired by any other productions.
# scons www/ dies with this
# # if env['python']:
# # env.Alias('pydoc', "www/pydoc/index.html")
# #
# # # We need to run epydoc with the Python version the modules built for.
# # # So we define our own epydoc instead of using /usr/bin/epydoc
# # EPYDOC = "python -c 'from epydoc.cli import cli; cli()'"
# # env.Command('www/pydoc/index.html', python_progs + glob.glob("*.py")
# # + glob.glob("gps/*.py"), [
# # 'mkdir -p www/pydoc',
# # EPYDOC + " -v --html --graph all -n GPSD $SOURCES -o www/pydoc",
# # ])
# Productions for setting up and performing udev tests.
#
# Requires root. Do "udev-install", then "tail -f /var/log/syslog" in
# another window, then run 'scons udev-test', then plug and unplug the
# GPS ad libitum. All is well when you get fix reports each time a GPS
# is plugged in.
#
# In case you are a systemd user you might also need to watch the
# journalctl output. Instead of the hotplug script the gpsdctl@.service
# unit will handle hotplugging together with the udev rules.
#
# Note that a udev event can be triggered with an invocation like:
# udevadm trigger --sysname-match=ttyUSB0 --action add
if env['systemd']:
systemdinstall_target = [env.Install(DESTDIR + env['unitdir'],
"systemd/%s" % (x,)) for x in
("gpsdctl@.service", "gpsd.service",
"gpsd.socket")]
systemd_install = env.Alias('systemd_install', systemdinstall_target)
systemd_uninstall = env.Command(
'systemd_uninstall', '',
Flatten(Uninstall(Alias("systemd_install"))) or "")
env.AlwaysBuild(systemd_uninstall)
env.Precious(systemd_uninstall)
hotplug_wrapper_install = []
else:
hotplug_wrapper_install = [
'cp $SRCDIR/../gpsd.hotplug ' + DESTDIR + env['udevdir'],
'chmod a+x ' + DESTDIR + env['udevdir'] + '/gpsd.hotplug'
]
udev_install = Utility('udev-install', 'install', [
'mkdir -p ' + DESTDIR + env['udevdir'] + '/rules.d',
'cp $SRCDIR/gpsd.rules ' + DESTDIR + env['udevdir'] +
'/rules.d/25-gpsd.rules', ] + hotplug_wrapper_install)
if env['systemd']:
env.Requires(udev_install, systemd_install)
if not env["sysroot"]:
systemctl_daemon_reload = Utility('systemctl-daemon-reload', '',
['systemctl daemon-reload || true'])
env.AlwaysBuild(systemctl_daemon_reload)
env.Precious(systemctl_daemon_reload)
env.Requires(systemctl_daemon_reload, systemd_install)
env.Requires(udev_install, systemctl_daemon_reload)
Utility('udev-uninstall', '', [
'rm -f %s/gpsd.hotplug' % env['udevdir'],
'rm -f %s/rules.d/25-gpsd.rules' % env['udevdir'],
])
Utility('udev-test', '',
['$SRCDIR/gpsd/gpsd -N -n -F /var/run/gpsd.sock -D 5', ])
# Default targets
if not cleaning:
# FIXME: redundant?
env.Default(build)
# Tags for Emacs and vi
misc_sources = ['clients/cgps.c',
'clients/gps2udp.c',
'clients/gpsdctl.c',
'clients/gpsdecode.c',
'clients/gpspipe.c',
'clients/gpxlogger.c',
'clients/ntpshmmon.c',
'clients/ppscheck.c',
'gpsctl.c',
]
sources = libgpsd_sources + libgps_sources + gpsd_sources + gpsmon_sources + \
misc_sources
env.Command('#TAGS', sources, ['etags ' + " ".join(sources)])
# Release machinery begins here
#
# We need to be in the actual project repo (i.e. not doing a -Y build)
# for these productions to work.
distfiles.sort()
# remove git and CI stuff from files to tar/zip
distfiles_ignore = [
".ci-build/build.sh",
".ci-build/test_options.sh",
".gitignore",
".gitlab-ci.yml",
".travis.yml",
# remove contrib/ais-samples
"contrib/ais-samples/ais-nmea-sample.log",
"contrib/ais-samples/ais-nmea-sample.log.chk", ]
for fn in distfiles_ignore:
if fn in distfiles:
distfiles.remove(fn)
# tar balls do not need all generated files
# tar balls do need packaging
for f in packing:
if f not in distfiles:
# should not be in git, generated file, we need it
distfiles.append(f)
# zip archive
target = '#gpsd-${VERSION}.zip'
dozip = env.Zip(target, distfiles)
ziptgt = env.Alias('zip', dozip)
if have_tar:
target = '#gpsd-%s.tar' % gpsd_version
# .tar.gz archive
gzenv = Environment(TARFLAGS='-c -z')
targz = gzenv.Tar(target + '.gz', distfiles)
# .tar.xz archive
xzenv = Environment(TARFLAGS='-c -J')
tarxz = xzenv.Tar(target + '.xz', distfiles)
env.Alias('tar', [targz, tarxz])
env.Alias('dist', [ziptgt, targz, tarxz])
Clean('build', [targz, tarxz, dozip])
# Make sure build-from-tarball works.
# Use possibly nonstandard name for scons
scons_cmd = [scons_executable_name]
# Inherit selected options from this scons run
if GetOption('silent'):
scons_cmd.append('-s')
if GetOption('no_progress'): # Undocumented name
scons_cmd.append('-Q')
njobs = GetOption('num_jobs')
if njobs != 1:
scons_cmd.append('-j%d' % njobs)
testbuild = Utility('testbuild', [targz], [
'rm -Rf testbuild',
'mkdir testbuild',
'cd testbuild;'
'pwd;'
'${TAR} -xzvf ../gpsd-${VERSION}.tar.gz;'
'cd gpsd-${VERSION}; %s;' % ' '.join(scons_cmd),
])
releasecheck = env.Alias('releasecheck', [
testbuild,
check,
audits,
])
# The chmod copes with the fact that scp will give a
# replacement the permissions of the *original*...
upload_release = Utility('upload-release', ['dist'], [
'rm -f gpsd-*.sig',
'gpg -b gpsd-${VERSION}.tar.gz',
'gpg -b gpsd-${VERSION}.tar.xz',
'gpg -b gpsd-${VERSION}.zip',
'chmod ug=rw,o=r gpsd-${VERSION}.tar.* gpsd-${VERSION}.zip',
'scp gpsd-${VERSION}.tar.* gpsd-${VERSION}.zip* ' + scpupload,
])
env.Alias('upload_release', upload_release) # For '_' vs. '-'
# How to tag a release
tag_release = Utility('tag-release', [], [
'git tag -s -m "Tagged for external release ${VERSION}" \
release-${VERSION}'])
env.Alias('tag_release', tag_release) # For '_' vs. '-'
upload_tags = Utility('upload-tags', [], ['git push --tags'])
env.Alias('upload_tags', upload_tags) # For '_' vs. '-'
# Local release preparation. This production will require Internet access,
# but it doesn't do any uploads or public repo mods.
#
# Note that tag_release has to fire early, otherwise the value of REVISION
# won't be right when gpsd_config.h is generated for the tarball.
# FIXME: this is confused
releaseprep = env.Alias("releaseprep",
[Utility("distclean", [],
["rm -f include/gpsd_config.h"]),
tag_release,
'dist'])
# How to update the website. Assumes a local GitLab pages setup.
# See "pages:" in .gitlab-ci.yml
www_dest = os.environ.get('WEBSITE', '.public')
website = Utility("website", www,
'rsync --exclude="*.in" -avz buildtmp/www/ %s ' % www_dest)
# All a buildup to this.
env.Alias("release", [releaseprep,
upload_release,
upload_tags,
website])
# Undo local release preparation
undoprep = Utility("undoprep", [],
['rm -f gpsd-${VERSION}.tar.?z',
'rm -f gpsd-$VERSION}.zip',
'git tag -d release-${VERSION};'])
#######
# start Debian stuff
#######
# Make RPM from the specfile in packaging
# untested
dist_rpm = Utility('dist-rpm', 'dist', 'rpmbuild -ta gpsd-${VERSION}.tar.gz')
env.Pseudo(dist_rpm) # mark as fake target.
env.Alias('distrpm', dist_rpm) # For '_' vs. '-'
# Experimental release mechanics using shipper
# This will ship a freecode metadata update
# untested
ship = Utility("ship", ['dist', "control"],
['cd %s; shipper version=%s | sh -e -x' %
(variantdir, gpsd_version)])
#######
# end Debian stuff
#######
# Release machinery ends here
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
# vim: set expandtab shiftwidth=4
| ntpsec/gpsd | SConscript | SConscript | 128,460 | python | en | code | 30 | github-code | 50 | |
11335889947 | import re
from collections import defaultdict, Counter
def load(path: str, encoding: str = "utf-8") -> str:
with open(path, encoding=encoding, errors="ignore") as f:
data = f.read()
return data
def preprocess(text: str) -> str:
# добавить энтити рекогнишн
return text
def tokenize(text: str) -> list[str]:
return re.findall(r"\w{2,}", text)
def count_words(words: list[str]) -> dict[str, int]:
counter = {}
for word in words:
if word not in counter:
counter[word] = 1
else:
counter[word] += 1
return counter
def count_words__default(words: list[str]) -> dict[str, int]:
counter = defaultdict(int)
for word in words:
counter[word] += 1
return counter
def count_words__count(words: list[str]) -> dict[str, int]:
counter = {}
for word in set(words):
counter[word] = words.count(word)
return counter
def count_words__counter(words: list[str]) -> dict[str, int]:
counter = Counter(words)
return counter.most_common(10)
if __name__ == "__main__":
text = load("data/wizard_oz.txt", encoding="cp1251")
text = preprocess(text)
words = tokenize(text)
print(count_words__counter(words))
| BoeingLess/nlp-project | 08.py | 08.py | py | 1,261 | python | en | code | 0 | github-code | 50 |
7122916860 | import httplib2
import os
import json
from apiclient import discovery
from apiclient.http import MediaFileUpload
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'AcademiaDasApostas'
file_id = '1_NmlDRUS0ITWVpQ7E_ImzT01HRgX4ZHyQJ1omVA7yv4'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,'academiadasapostas.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def drive_upload(http):
drive_service = discovery.build('drive', 'v3', http=http)
file_metadata = {
'name' : 'AcademiaDasApostas',
'mimeType' : 'application/vnd.google-apps.spreadsheet'
}
media = MediaFileUpload('bets.txt', mimetype='text/csv', resumable=True)
file = drive_service.files().update(fileId=file_id, body=file_metadata, media_body=media, fields='id').execute()
print(file)
def sheets_update(sheets_service, grid_id):
body = {
"requests": [
{
"autoResizeDimensions": {
"dimensions": {
"sheetId": grid_id,
"dimension": "COLUMNS"
}
}
},
{
"repeatCell": {
"range": {
"sheetId": grid_id
},
"cell": {
"userEnteredFormat": {
"horizontalAlignment" : "CENTER"
}
},
"fields": "userEnteredFormat(horizontalAlignment)"
}
}
]
}
response = sheets_service.spreadsheets().batchUpdate(spreadsheetId=file_id, body=body).execute()
print (response)
def list_cond_formats(sheets_service):
response = sheets_service.spreadsheets().get(spreadsheetId=file_id, fields=sheets(properties(title,sheetId),conditionalFormats)).execute()
def get_grid_id(sheets_service):
response = sheets_service.spreadsheets().get(spreadsheetId=file_id, includeGridData=True).execute()
return response['sheets'][0]['properties']['sheetId']
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
sheets_discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?version=v4')
sheets_service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=sheets_discoveryUrl)
drive_upload(http)
sheets_update(sheets_service, get_grid_id(sheets_service))
if __name__ == '__main__':
main() | amsimoes/spiderBet | sheets.py | sheets.py | py | 3,826 | python | en | code | 14 | github-code | 50 |
11147848190 | from django import forms
from django.core import validators
from .models import Comment
error_msg = {
"required": 'این فیلد اجباری است'
}
class CommentAddForm(forms.Form):
subject = forms.CharField(
max_length=120,
widget=forms.TextInput(attrs={"class": 'input-ui pr-2', "placeholder": 'عنوان نظر خود را بنویسید'}),
error_messages=error_msg,
required=True
)
positive_point = forms.CharField(
max_length=500,
widget=forms.TextInput(
attrs={"class": 'input-ui pr-2 ui-input-field', "id": 'advantage-input', "autocomplete": 'off'}),
required=False,
)
negative_point = forms.CharField(
max_length=500,
widget=forms.TextInput(
attrs={"class": 'input-ui pr-2 ui-input-field', "id": 'advantage-input', "autocomplete": 'off'}),
required=False,
)
body = forms.CharField(
widget=forms.Textarea(attrs={"class": 'input-ui pr-2 pt-2', "rows": 5, "placeholder": 'متن خود را بنویسید'}),
error_messages=error_msg,
required=True
)
CHOICES = (
(1, 'پیشنهاد میکنم'),
(2, 'خیر،پیشنهاد نمی کنم'),
(3, 'نظری ندارم'),
)
suggest = forms.ChoiceField(
choices=CHOICES,
widget=forms.RadioSelect(),
required=False
)
quality = forms.CharField(
widget=forms.TextInput(
attrs={"id": 'ex19', "type": 'text', "data-provide": 'slider', "data-slider-ticks": '[1, 2, 3, 4, 5]',
"data-slider-ticks-labels": '["خیلی بد", "بد", "معمولی","خوب","عالی"]',
"data-slider-min": '1', "data-slider-max": '5', "data-slider-step": '1',
"data-slider-value": '3', "data-slider-tooltip": 'hide'})
)
cost = forms.CharField(
widget=forms.TextInput(
attrs={"id": 'ex19', "type": 'text', "data-provide": 'slider', "data-slider-ticks": '[1, 2, 3, 4, 5]',
"data-slider-ticks-labels": '["خیلی بد", "بد", "معمولی","خوب","عالی"]',
"data-slider-min": '1', "data-slider-max": '5', "data-slider-step": '1',
"data-slider-value": '3', "data-slider-tooltip": 'hide'})
)
innovation = forms.CharField(
widget=forms.TextInput(
attrs={"id": 'ex19', "type": 'text', "data-provide": 'slider', "data-slider-ticks": '[1, 2, 3, 4, 5]',
"data-slider-ticks-labels": '["خیلی بد", "بد", "معمولی","خوب","عالی"]',
"data-slider-min": '1', "data-slider-max": '5', "data-slider-step": '1',
"data-slider-value": '3', "data-slider-tooltip": 'hide'})
)
features = forms.CharField(
widget=forms.TextInput(
attrs={"id": 'ex19', "type": 'text', "data-provide": 'slider', "data-slider-ticks": '[1, 2, 3, 4, 5]',
"data-slider-ticks-labels": '["خیلی بد", "بد", "معمولی","خوب","عالی"]',
"data-slider-min": '1', "data-slider-max": '5', "data-slider-step": '1',
"data-slider-value": '3', "data-slider-tooltip": 'hide'})
)
easiness = forms.CharField(
widget=forms.TextInput(
attrs={"id": 'ex19', "type": 'text', "data-provide": 'slider', "data-slider-ticks": '[1, 2, 3, 4, 5]',
"data-slider-ticks-labels": '["خیلی بد", "بد", "معمولی","خوب","عالی"]',
"data-slider-min": '1', "data-slider-max": '5', "data-slider-step": '1',
"data-slider-value": '3', "data-slider-tooltip": 'hide'})
)
designing = forms.CharField(
widget=forms.TextInput(
attrs={"id": 'ex19', "type": 'text', "data-provide": 'slider', "data-slider-ticks": '[1, 2, 3, 4, 5]',
"data-slider-ticks-labels": '["خیلی بد", "بد", "معمولی","خوب","عالی"]',
"data-slider-min": '1', "data-slider-max": '5', "data-slider-step": '1',
"data-slider-value": '3', "data-slider-tooltip": 'hide'})
)
def save(self, user, product):
cd = self.cleaned_data
comment = Comment(
user=user, product=product, subject=cd['subject'], body=cd['body'], quality=cd['quality'], cost=cd['cost'],
innovation=cd['innovation'], features=cd['features'], easiness=cd['easiness'], designing=cd['designing']
)
if cd['positive_point']:
comment.positive_point = cd['positive_point']
if cd['negative_point']:
comment.negative_point = cd['negative_point']
if cd['suggest']:
comment.suggest = cd['suggest']
comment.save()
return comment
class QuestionForm(forms.Form):
name = forms.CharField(
max_length=64,
widget=forms.TextInput(attrs={"class": 'form-control col-md-3', "placeholder": 'نام شما'}),
required=True
)
body = forms.CharField(
widget=forms.Textarea(attrs={"class": 'form-control mb-3', "rows": '5', "placeholder": 'متن پرسش شما'}),
required=True
)
notify = forms.BooleanField(
widget=forms.CheckboxInput(attrs={"class": 'custom-control-input', "id": 'customCheck3'}),
required=False
)
def clean_name(self):
if len(self.cleaned_data['name']) > 64:
raise forms.ValidationError('نام نمیتواند بیش از ۶۴ کاراکتر باشد')
return self.cleaned_data['name']
| alireza-fa/didikala | comment/forms.py | forms.py | py | 5,625 | python | en | code | 0 | github-code | 50 |
27089425598 | # -*- coding: utf-8 -*-
# ---------------------
# Yolo6d network, include losses
# @Author: Fan, Mo
# ---------------------
# import sys
import numpy as np
import tensorflow as tf
import config as cfg
from utils.utils import (
softmax_cross_entropy,
conf_mean_squared_error,
coord_mean_squared_error,
confidence9,
get_max_index,
corner_confidences9,
corner_confidence9,
)
class YOLO6D_net:
def __init__(self, is_training=True):
"""
Input images: [batch, 416 * 416 * 3]
Input labels: [batch * 13 * 13 * (19 + num_classes)]
output tensor: [batch, 13 * 13 * (19 + num_classes)]
"""
self.is_training = is_training
self.Batch_Size = cfg.BATCH_SIZE
self.EPSILON = cfg.EPSILON
self.learning_rate = cfg.LEARNING_RATE
self.total_loss = None
self.disp = cfg.DISP
self.boxes_per_cell = cfg.BOXES_PER_CELL
self.image_size = cfg.IMAGE_SIZE
self.num_class = cfg.NUM_CLASSES
self.Batch_Norm = cfg.BATCH_NORM
self.cell_size = cfg.CELL_SIZE
self.obj_scale = cfg.CONF_OBJ_SCALE
self.noobj_scale = cfg.CONF_NOOBJ_SCALE
self.class_scale = cfg.CLASS_SCALE
self.coord_scale = cfg.COORD_SCALE
self.thresh = 0.6
self.boundry_1 = 9 * 2 ## Seperate coordinates
self.boundry_2 = self.num_class
self.input_images = tf.placeholder(tf.float32, [self.Batch_Size, self.image_size, self.image_size, 3], name='images')
self.logit = self.build_networks(self.input_images) # shape: [batch, cell, cell, 20]
self.labels = tf.placeholder(tf.float32, [self.Batch_Size, self.cell_size, self.cell_size, 20], name='labels')
self.target = tf.placeholder(tf.float32, [self.Batch_Size, 21], name='target')
if self.is_training:
self.total_loss = self.Region_Loss(self.logit, self.target, self.labels)
tf.summary.tensor_summary('Total_loss', self.total_loss)
# ======================== Net definition ==================================
def build_networks(self, inputs):
if self.disp:
print("\n--------------Building network---------------")
net = self.conv_layer(inputs, [3, 3, 3, 32], name = '0_conv')
net = self.pooling_layer(net, name = '1_pool')
net = self.conv_layer(net, [3, 3, 32, 64], name = '2_conv')
net = self.pooling_layer(net, name = '3_pool')
net = self.conv_layer(net, [3, 3, 64, 128], name = '4_conv')
net = self.conv_layer(net, [1, 1, 128, 64], name = '5_conv')
net = self.conv_layer(net, [3, 3, 64, 128], name = '6_conv')
net = self.pooling_layer(net, name = '7_pool')
net = self.conv_layer(net, [3, 3, 128, 256], name = '8_conv')
net = self.conv_layer(net, [1, 1, 256, 128], name = '9_conv')
net = self.conv_layer(net, [3, 3, 128, 256], name = '10_conv')
net = self.pooling_layer(net, name = '11_pool')
net = self.conv_layer(net, [3, 3, 256, 512], name = '12_conv')
net = self.conv_layer(net, [1, 1, 512, 256], name = '13_conv')
net = self.conv_layer(net, [3, 3, 256, 512], name = '14_conv')
net = self.conv_layer(net, [1, 1, 512, 256], name = '15_conv')
net16 = self.conv_layer(net, [3, 3, 256, 512], name = '16_conv')
net = self.pooling_layer(net16, name = '17_pool')
net = self.conv_layer(net, [3, 3, 512, 1024], name = '18_conv')
net = self.conv_layer(net, [1, 1, 1024, 512], name = '19_conv')
net = self.conv_layer(net, [3, 3, 512, 1024], name = '20_conv')
net = self.conv_layer(net, [1, 1, 1024, 512], name = '21_conv')
net = self.conv_layer(net, [3, 3, 512, 1024], name = '22_conv')
net = self.conv_layer(net, [3, 3, 1024, 1024], name = '23_conv')
net24 = self.conv_layer(net, [3, 3, 1024, 1024], name = '24_conv')
net = self.conv_layer(net16, [1, 1, 512, 64], name = '26_conv')
net = self.reorg(net)
net = tf.concat([net, net24], 3)
net = self.conv_layer(net, [3, 3, int(net.get_shape()[3]), 1024], name = '29_conv')
net = self.conv_layer(net, [1, 1, 1024, 20], batch_norm=False, name = '30_conv', activation='linear') # for 18 coords and 1 confidence
if self.disp:
print("----------Building network complete----------\n")
return net
def conv_layer(self, inputs, shape, batch_norm = True, name = '0_conv', activation = 'leaky'):
initializer = tf.contrib.layers.xavier_initializer()
weight = tf.Variable(initializer(shape), name='weight')
# weight = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='weight')
biases = tf.Variable(tf.constant(1.0, shape=[shape[3]]), name='biases')
conv = tf.nn.conv2d(inputs, weight, strides=[1, 1, 1, 1], padding='SAME', name=name)
if batch_norm:
depth = shape[3]
scale = tf.Variable(tf.ones([depth, ], dtype='float32'), name='scale')
shift = tf.Variable(tf.zeros([depth, ], dtype='float32'), name='shift')
mean = tf.Variable(tf.ones([depth, ], dtype='float32'), name='rolling_mean')
variance = tf.Variable(tf.ones([depth, ], dtype='float32'), name='rolling_variance')
conv = tf.nn.batch_normalization(conv, mean, variance, shift, scale, 1e-05)
conv = tf.add(conv, biases)
else:
conv = tf.add(conv, biases)
if activation == 'leaky':
conv = tf.nn.leaky_relu(conv, alpha=0.1)
elif activation == 'relu':
conv = tf.nn.relu(conv)
elif activation == 'linear':
return conv
return conv
def pooling_layer(self, inputs, name = '1_pool'):
pool = tf.nn.max_pool(inputs, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME', name = name)
return pool
def reorg(self, inputs):
"""
Reorg the tensor(half the size, 4* the depth)
"""
outputs_1 = inputs[:, ::2, ::2, :]
outputs_2 = inputs[:, ::2, 1::2, :]
outputs_3 = inputs[:, 1::2, ::2, :]
outputs_4 = inputs[:, 1::2, 1::2, :]
output = tf.concat([outputs_1, outputs_2, outputs_3, outputs_4], axis = 3)
return output
# ======================= Net definition end ===============================
def Region_Loss(self, output, target, labels, scope='Loss'):
"""
output: output from net, shape: [batch, cell, cell, 19], type: tf.tensor (18 coords + conf)
target: ground truth, shape: [batch, 21], type: tf.tensor
labels: ground truth, shape: [batch, cell, cell, 20] type: tf.tensor
"""
shape = output.get_shape()
nB = shape[0].value
nC = 1
nH = shape[1].value
nW = shape[2].value
with tf.variable_scope(scope):
x0 = tf.reshape(tf.nn.sigmoid(output[:,:,:,0]), (nB, nH, nW))
y0 = tf.reshape(tf.nn.sigmoid(output[:,:,:,1]), (nB, nH, nW))
x1 = tf.reshape(output[:,:,:,2], (nB, nH, nW))
y1 = tf.reshape(output[:,:,:,3], (nB, nH, nW))
x2 = tf.reshape(output[:,:,:,4], (nB, nH, nW))
y2 = tf.reshape(output[:,:,:,5], (nB, nH, nW))
x3 = tf.reshape(output[:,:,:,6], (nB, nH, nW))
y3 = tf.reshape(output[:,:,:,7], (nB, nH, nW))
x4 = tf.reshape(output[:,:,:,8], (nB, nH, nW))
y4 = tf.reshape(output[:,:,:,9], (nB, nH, nW))
x5 = tf.reshape(output[:,:,:,10], (nB, nH, nW))
y5 = tf.reshape(output[:,:,:,11], (nB, nH, nW))
x6 = tf.reshape(output[:,:,:,12], (nB, nH, nW))
y6 = tf.reshape(output[:,:,:,13], (nB, nH, nW))
x7 = tf.reshape(output[:,:,:,14], (nB, nH, nW))
y7 = tf.reshape(output[:,:,:,15], (nB, nH, nW))
x8 = tf.reshape(output[:,:,:,16], (nB, nH, nW))
y8 = tf.reshape(output[:,:,:,17], (nB, nH, nW))
conf = tf.reshape(tf.nn.sigmoid(output[:,:,:,18]), (nB, nH, nW))
# cls = tf.reshape(output[:,:,:,19], (nB, nH, nW))
# Create pred boxes
pred_corners = np.zeros([18, nB*nH*nW], dtype=np.float32) # [18, batch*169]
pred_corners = []
grid_x = np.tile(np.tile(np.linspace(0, nW-1, nW), (nH, 1)).transpose([1,0]), (nB, 1, 1)).reshape(nB*nH*nW) # [batch*169]
grid_y = np.tile(np.tile(np.linspace(0, nH-1, nH), (nW, 1)), (nB, 1, 1)).reshape(nB*nH*nW) # [batch*169]
pred_corners.append((tf.reshape(x0, [nB*nH*nW]) + grid_x) / nW) # divide by nW to set the number to be percentage value
pred_corners.append((tf.reshape(y0, [nB*nH*nW]) + grid_y) / nH)
pred_corners.append((tf.reshape(x1, [nB*nH*nW]) + grid_x) / nW)
pred_corners.append((tf.reshape(y1, [nB*nH*nW]) + grid_y) / nH)
pred_corners.append((tf.reshape(x2, [nB*nH*nW]) + grid_x) / nW)
pred_corners.append((tf.reshape(y2, [nB*nH*nW]) + grid_y) / nH)
pred_corners.append((tf.reshape(x3, [nB*nH*nW]) + grid_x) / nW)
pred_corners.append((tf.reshape(y3, [nB*nH*nW]) + grid_y) / nH)
pred_corners.append((tf.reshape(x4, [nB*nH*nW]) + grid_x) / nW)
pred_corners.append((tf.reshape(y4, [nB*nH*nW]) + grid_y) / nH)
pred_corners.append((tf.reshape(x5, [nB*nH*nW]) + grid_x) / nW)
pred_corners.append((tf.reshape(y5, [nB*nH*nW]) + grid_y) / nH)
pred_corners.append((tf.reshape(x6, [nB*nH*nW]) + grid_x) / nW)
pred_corners.append((tf.reshape(y6, [nB*nH*nW]) + grid_y) / nH)
pred_corners.append((tf.reshape(x7, [nB*nH*nW]) + grid_x) / nW)
pred_corners.append((tf.reshape(y7, [nB*nH*nW]) + grid_y) / nH)
pred_corners.append((tf.reshape(x8, [nB*nH*nW]) + grid_x) / nW)
pred_corners.append((tf.reshape(y8, [nB*nH*nW]) + grid_y) / nH)
pred_corners = tf.convert_to_tensor(pred_corners)
pred_corners = tf.reshape(tf.transpose(pred_corners, (0,1)), (-1, 18)) #(nB X 169) X 18
# Build targets
nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx0, tx1, tx2, tx3, tx4, tx5, tx6, tx7, tx8, ty0, ty1, ty2, ty3, ty4, ty5, ty6, ty7, ty8, tconf = \
self.build_targets(pred_corners, target, labels, nC, nH, nW, self.noobj_scale, self.obj_scale, self.thresh)
conf_mask = tf.sqrt(conf_mask)
# Create loss
loss = []
loss_x0 = tf.losses.mean_squared_error(x0*coord_mask, tx0*coord_mask, weights=self.coord_scale)/2.0
loss_y0 = tf.losses.mean_squared_error(y0*coord_mask, ty0*coord_mask, weights=self.coord_scale)/2.0
loss_x1 = tf.losses.mean_squared_error(x1*coord_mask, tx1*coord_mask, weights=self.coord_scale)/2.0
loss_y1 = tf.losses.mean_squared_error(y1*coord_mask, ty1*coord_mask, weights=self.coord_scale)/2.0
loss_x2 = tf.losses.mean_squared_error(x2*coord_mask, tx2*coord_mask, weights=self.coord_scale)/2.0
loss_y2 = tf.losses.mean_squared_error(y2*coord_mask, ty2*coord_mask, weights=self.coord_scale)/2.0
loss_x3 = tf.losses.mean_squared_error(x3*coord_mask, tx3*coord_mask, weights=self.coord_scale)/2.0
loss_y3 = tf.losses.mean_squared_error(y3*coord_mask, ty3*coord_mask, weights=self.coord_scale)/2.0
loss_x4 = tf.losses.mean_squared_error(x4*coord_mask, tx4*coord_mask, weights=self.coord_scale)/2.0
loss_y4 = tf.losses.mean_squared_error(y4*coord_mask, ty4*coord_mask, weights=self.coord_scale)/2.0
loss_x5 = tf.losses.mean_squared_error(x5*coord_mask, tx5*coord_mask, weights=self.coord_scale)/2.0
loss_y5 = tf.losses.mean_squared_error(y5*coord_mask, ty5*coord_mask, weights=self.coord_scale)/2.0
loss_x6 = tf.losses.mean_squared_error(x6*coord_mask, tx6*coord_mask, weights=self.coord_scale)/2.0
loss_y6 = tf.losses.mean_squared_error(y6*coord_mask, ty6*coord_mask, weights=self.coord_scale)/2.0
loss_x7 = tf.losses.mean_squared_error(x7*coord_mask, tx7*coord_mask, weights=self.coord_scale)/2.0
loss_y7 = tf.losses.mean_squared_error(y7*coord_mask, ty7*coord_mask, weights=self.coord_scale)/2.0
loss_x8 = tf.losses.mean_squared_error(x8*coord_mask, tx8*coord_mask, weights=self.coord_scale)/2.0
loss_y8 = tf.losses.mean_squared_error(y8*coord_mask, ty8*coord_mask, weights=self.coord_scale)/2.0
loss_conf = tf.losses.mean_squared_error(conf*conf_mask, tconf*conf)/2.0
loss_cls = 0
loss_x = loss_x0 + loss_x1 + loss_x2 + loss_x3 + loss_x4 + loss_x5 + loss_x6 + loss_x7 + loss_x8
loss_y = loss_y0 + loss_y1 + loss_y2 + loss_y3 + loss_y4 + loss_y5 + loss_y6 + loss_y7 + loss_y8
loss_coord = loss_x + loss_y
total_loss = loss_conf + loss_coord + loss_cls
loss.append(total_loss)
loss.append(loss_conf)
loss.append(loss_coord)
loss.append(loss_cls)
loss = tf.convert_to_tensor(loss)
return loss
def build_targets(self, pred_corners, target, labels, num_classes, nH, nW, noobject_scale, object_scale, sil_thresh):
"""
pred_corners: compute by net and calculated, shape: [(nB X 169), 18], type: tf.tensor, value in percentage
target: read from label files, shape: [nB, 21], type: tf.tensor
labels: shape: [nB, nH, nW, 20], type: tf.tensor
num_classes: 1
nH: 13
nW: 13
noobject_scale: 0.1
object_scale: 5
sil_thresh: 0.6
"""
nB = self.Batch_Size
# nC = num_classes
conf_mask = []
coord_mask = []
cls_mask = []
tconf = []
response = labels[:,:,:,0] # [nB, nW, nH]
tx0 = labels[:,:,:,1]
ty0 = labels[:,:,:,2]
tx1 = labels[:,:,:,3]
ty1 = labels[:,:,:,4]
tx2 = labels[:,:,:,5]
ty2 = labels[:,:,:,6]
tx3 = labels[:,:,:,7]
ty3 = labels[:,:,:,8]
tx4 = labels[:,:,:,9]
ty4 = labels[:,:,:,10]
tx5 = labels[:,:,:,11]
ty5 = labels[:,:,:,12]
tx6 = labels[:,:,:,13]
ty6 = labels[:,:,:,14]
tx7 = labels[:,:,:,15]
ty7 = labels[:,:,:,16]
tx8 = labels[:,:,:,17]
ty8 = labels[:,:,:,18]
nAnchors = nH*nW
nPixels = nH*nW
for b in range(nB):
cur_pre_corners = tf.transpose(pred_corners[b*nAnchors:(b+1)*nAnchors], (1,0)) # 18 X 169
gx0 = target[b][1] # a value, in percentage
gy0 = target[b][2]
gx1 = target[b][3]
gy1 = target[b][4]
gx2 = target[b][5]
gy2 = target[b][6]
gx3 = target[b][7]
gy3 = target[b][8]
gx4 = target[b][9]
gy4 = target[b][10]
gx5 = target[b][11]
gy5 = target[b][12]
gx6 = target[b][13]
gy6 = target[b][14]
gx7 = target[b][15]
gy7 = target[b][16]
gx8 = target[b][17]
gy8 = target[b][18]
cur_gt_corners = tf.transpose(tf.tile(tf.Variable([[gx0, gy0, gx1, gy1, gx2, gy2, gx3, gy3, gx4, gy4,\
gx5, gy5, gx6, gy6, gx7, gy7, gx8, gy8]], trainable=False), (nAnchors, 1)), (1, 0)) # 18 X 169
# compute current confidence value
cur_confs = tf.nn.relu(corner_confidences9(cur_gt_corners, cur_pre_corners)) # [169]
temp = tf.reshape(tf.cast(cur_confs < sil_thresh, tf.float32), (nH, nW)) * noobject_scale
conf_mask.append(temp) # a list
nGT = 0
nCorrect = 0
for b in range(nB):
nGT = nGT + 1
best_n = -1
gx0 = target[b][1] # tensor with shape (1,)
gy0 = target[b][2]
gx1 = target[b][3]
gy1 = target[b][4]
gx2 = target[b][5]
gy2 = target[b][6]
gx3 = target[b][7]
gy3 = target[b][8]
gx4 = target[b][9]
gy4 = target[b][10]
gx5 = target[b][11]
gy5 = target[b][12]
gx6 = target[b][13]
gy6 = target[b][14]
gx7 = target[b][15]
gy7 = target[b][16]
gx8 = target[b][17]
gy8 = target[b][18]
gi0, gj0 = get_max_index(response[b])
best_n = 0 # 1 anchor, single object
temp_location = response[b] # [nW, nH]
gt_box = tf.convert_to_tensor([gx0, gy0, gx1, gy1, gx2, gy2, gx3, gy3, gx4, gy4,\
gx5, gy5, gx6, gy6, gx7, gy7, gx8, gy8]) # (18, )
pred_box = pred_corners[b * nAnchors + gi0 * nW + gj0] # (18, )
conf = corner_confidence9(gt_box, pred_box) # (1, )
coord_mask.append(temp_location)
cls_mask.append(temp_location)
# conf_temp = np.ones([nH, nW])
conf_temp = temp_location * object_scale
conf_mask[b] = conf_mask[b] + conf_temp
# tconf[b][gj0][gi0] = conf
tconf.append(temp_location * conf)
# if conf > 0.5:
# nCorrect = nCorrect + 1
nCorrect = tf.cond(conf > 0.5, lambda: nCorrect + 1, lambda: nCorrect)
tconf = tf.convert_to_tensor(tconf)
conf_mask = tf.convert_to_tensor(conf_mask)
coord_mask = tf.convert_to_tensor(coord_mask)
cls_mask = tf.convert_to_tensor(cls_mask)
return nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx0, tx1, tx2, tx3, tx4, tx5,\
tx6, tx7, tx8, ty0, ty1, ty2, ty3, ty4, ty5, ty6, ty7, ty8, tconf
def loss_layer(self, predicts, labels, scope='Loss_layer'):
"""
Args:
predict tensor: [batch_size, cell_size, cell_size, 19 + num_class] 19 is 9-points'-coord(18) + 1-confidence
last dimension: coord(18) ==> classes(num_class) ==> confidence(1)
labels tensor: [batch_size, cell_size, cell_size, 20 + num_class] 20 is 9-points'-coord + 1-response + 1-confidence
last dimension: response(1) ==> coord(18) ==> classes(num_class) ==> confidence(1)
"""
with tf.variable_scope(scope):
## Ground Truth
response = tf.reshape(labels[:, :, :, 0], [self.Batch_Size, self.cell_size, self.cell_size, 1])
gt_tensor = []
gt_idx = []
# get the responsible tensor's index
for i in range(self.Batch_Size):
gt_resp = tf.reshape(response[i], [self.cell_size, self.cell_size])
gt_i, gt_j = get_max_index(gt_resp)
temp_tensor = labels[i, gt_i, gt_j,:] # shape: [32,]
gt_tensor.append(temp_tensor)
gt_idx.append([gt_i, gt_j])
gt_tensor = tf.convert_to_tensor(gt_tensor) # shape: [batch, 32], store object tensors
gt_idx = tf.convert_to_tensor(gt_idx) # shape: [batch, 2]
#metric
labels_coord = gt_tensor[:, 1:self.boundry_1+1] # for later coord loss
labels_classes = gt_tensor[:, self.boundry_1+1: ] # for later class loss
gt_coords = labels[:, :, :, 1:self.boundry_1+1] # [batch, cell, cell, 18]
ground_true_boxes_x = tf.transpose(tf.stack([gt_coords[:,:,:,0], gt_coords[:,:,:,2], gt_coords[:,:,:,4], gt_coords[:,:,:,6],
gt_coords[:,:,:,8], gt_coords[:,:,:,10], gt_coords[:,:,:,12], gt_coords[:,:,:,14], gt_coords[:,:,:,16]]),
(1, 2, 3, 0)) # [Batch, cell, cell, 9], for later conf calculate
ground_true_boxes_y = tf.transpose(tf.stack([gt_coords[:,:,:,1], gt_coords[:,:,:,3], gt_coords[:,:,:,5], gt_coords[:,:,:,7],
gt_coords[:,:,:,9], gt_coords[:,:,:,11], gt_coords[:,:,:,13], gt_coords[:,:,:,15], gt_coords[:,:,:,17]]),
(1, 2, 3, 0)) # [Batch, cell, cell, 9], for later conf calculate
## Predicts
predict_conf = tf.reshape(predicts[:, :, :, 0], [self.Batch_Size, self.cell_size, self.cell_size, 1]) # get predicted confidence
predict_boxes_tr = tf.concat([tf.nn.sigmoid(predicts[:,:,:,1:3]), predicts[:,:,:,3:self.boundry_1+1]], 3)
# offset for predicts
off_set_x = np.tile(np.reshape(np.array([np.arange(13)] * 13 ), (13, 13, 1)), (1, 1, 9))
off_set_y = np.transpose(off_set_x, (1, 0, 2))
off_set_x = np.tile(np.transpose(np.reshape(off_set_x, (13, 13, 9, 1)), (3, 0, 1, 2)), (self.Batch_Size, 1, 1, 1)) # [Batch, cell, cell, 9]
off_set_y = np.tile(np.transpose(np.reshape(off_set_y, (13, 13, 9, 1)), (3, 0, 1, 2)), (self.Batch_Size, 1, 1, 1)) # [Batch, cell, cell, 9]
predict__x = tf.transpose(tf.stack([predict_boxes_tr[:,:,:,0], predict_boxes_tr[:,:,:,2], predict_boxes_tr[:,:,:,4],
predict_boxes_tr[:,:,:,6], predict_boxes_tr[:,:,:,8], predict_boxes_tr[:,:,:,10],
predict_boxes_tr[:,:,:,12], predict_boxes_tr[:,:,:,14], predict_boxes_tr[:,:,:,16]]),
(1,2,3,0)) # [Batch, cell, cell, 9]
predict__y = tf.transpose(tf.stack([predict_boxes_tr[:,:,:,1], predict_boxes_tr[:,:,:,3], predict_boxes_tr[:,:,:,5],
predict_boxes_tr[:,:,:,7], predict_boxes_tr[:,:,:,9], predict_boxes_tr[:,:,:,11],
predict_boxes_tr[:,:,:,13], predict_boxes_tr[:,:,:,15], predict_boxes_tr[:,:,:,17]]),
(1,2,3,0)) # [Batch, cell, cell, 9]
pred_box_x = predict__x + off_set_x # predict boxes x coordinates with offset, for later conf calculate
pred_box_y = predict__y + off_set_y # predict boxes y coordinates with offset, for later conf calculate
pred_boxes = tf.transpose(tf.stack([pred_box_x[:,:,:,0], pred_box_y[:,:,:,0],
pred_box_x[:,:,:,1], pred_box_y[:,:,:,1],
pred_box_x[:,:,:,2], pred_box_y[:,:,:,2],
pred_box_x[:,:,:,3], pred_box_y[:,:,:,3],
pred_box_x[:,:,:,4], pred_box_y[:,:,:,4],
pred_box_x[:,:,:,5], pred_box_y[:,:,:,5],
pred_box_x[:,:,:,6], pred_box_y[:,:,:,6],
pred_box_x[:,:,:,7], pred_box_y[:,:,:,7],
pred_box_x[:,:,:,8], pred_box_y[:,:,:,8]]), (1,2,3,0)) # predict coords [batch, cell, cell, 18]
pred_boxes = tf.concat([pred_boxes, predicts[:,:,:,19:]], 3) # [batch, cell, cell, 31], without confidence
pred_tensor = [] # restore tensor
# get the max confidence tensor and its index
for i in range(self.Batch_Size):
pred_conf = predict_conf[i]
pred_conf = tf.reshape(pred_conf, [self.cell_size, self.cell_size])
if self.obj_scale == 0.0:
# means in pre train
pred_i, pred_j = gt_idx[i, 0], gt_idx[i, 1]
else:
# in training
pred_i, pred_j = get_max_index(pred_conf)
temp_tensor = pred_boxes[i, pred_i, pred_j, :]
pred_tensor.append(temp_tensor)
pred_tensor = tf.convert_to_tensor(pred_tensor) # shape: [batch, 31], store tensors with max_confidence
# metric
predict_coord_tr = pred_tensor[:, :self.boundry_1] # for later coord loss
predict_classes = pred_tensor[:, self.boundry_1:] # for later class loss
## Calculate confidence (instead of IoU like in YOLOv2)
labels_conf = confidence9(pred_box_x, pred_box_y, ground_true_boxes_x, ground_true_boxes_y) # [batch, cell, cell, 1]
self.gt_conf = labels_conf
## Set coefs for loss
object_coef = tf.constant(self.obj_scale, dtype=tf.float32)
noobject_coef = tf.constant(self.noobj_scale, dtype=tf.float32)
conf_coef = tf.add(tf.ones_like(response)*noobject_coef, response*object_coef) # [batch, cell, cell, 1] with object:5.0, no object:0.1
coord_coef = tf.ones([self.Batch_Size, 1]) * self.coord_scale # [batch, 1]
class_coef = tf.ones([self.Batch_Size, 1]) * self.class_scale # [batch, 1]
## Compute losses
conf_loss = conf_mean_squared_error(predict_conf, labels_conf, weights=conf_coef)
coord_loss = coord_mean_squared_error(predict_coord_tr, labels_coord, weights=coord_coef)
class_loss = softmax_cross_entropy(predict_classes, labels_classes, weights=class_coef)
# class_loss = tf.losses.softmax_cross_entropy(labels_classes, predict_classes)
loss = conf_loss + coord_loss + class_loss
total_loss = []
total_loss.append(loss)
total_loss.append(conf_loss)
total_loss.append(coord_loss)
total_loss.append(class_loss)
return total_loss
def confidence_score(self, predicts, confidence):
"""
compute the class-specific confidence scores
see paper section 3.3
Args:
output tensor by net: [batch, cell_size, cell_size, 19+num_class]
"""
predict_classes = tf.reshape(predicts[:, :, :, 18:-1], [self.Batch_Size, self.cell_size, self.cell_size, self.num_class])
confidence = tf.tile(confidence, [1, 1, 1, self.num_class])
class_speci_conf_score = tf.multiply(predict_classes, confidence)
class_speci_conf_score = tf.reduce_mean(class_speci_conf_score, axis=3, keep_dims=True)
#class_speci_conf_score = tf.nn.sigmoid(class_speci_conf_score)
return class_speci_conf_score
def evaluation(self):
"""
turning network to evaluation mode, turn off Batch Norm(or Dropout)
"""
self.is_training = False
self.Batch_Norm = False
def evaluation_off(self):
self.is_training = True
self.Batch_Norm = True | Mmmofan/YOLO_6D | yolo_6d.py | yolo_6d.py | py | 27,080 | python | en | code | 54 | github-code | 50 |
4652076539 | from django.conf import settings
from django.contrib import messages
from django.shortcuts import redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from ..base import (
fire_form_callbacks,
get_theme,
run_form_handlers,
submit_plugin_form_data,
)
from ..constants import (
CALLBACK_BEFORE_FORM_VALIDATION,
CALLBACK_FORM_INVALID,
CALLBACK_FORM_VALID,
CALLBACK_FORM_VALID_AFTER_FORM_HANDLERS,
CALLBACK_FORM_VALID_BEFORE_SUBMIT_PLUGIN_FORM_DATA,
)
from ..dynamic import assemble_form_class
from ..exceptions import ImproperlyConfigured
from ..settings import GET_PARAM_INITIAL_DATA
__title__ = "fobi.integration.processors"
__author__ = "Artur Barseghyan <artur.barseghyan@gmail.com>"
__copyright__ = "2014-2019 Artur Barseghyan"
__license__ = "GPL 2.0/LGPL 2.1"
__all__ = ("IntegrationProcessor",)
class IntegrationProcessor(object):
"""Generic integration processor.
:param str form_sent_get_param:
:param bool can_redirect: If set to True, if not authenticated
an attempt to redirect user to a login page would be made. Otherwise,
a message about authentication would be generated instead (in place
of the form). Some content management systems, like Django-CMS, aren't
able to redirect on plugin level. For those systems, the value
of ``can_redirect`` should be set to False.
:param str login_required_template_name: Template to be used for
rendering the login required message. This is only important when
``login_required_redirect`` is set to False.
"""
can_redirect = True
form_sent_get_param = "sent"
login_required_template_name = "fobi/integration/login_required.html"
def integration_check(self, instance):
"""Integration check.
Performs a simple check to identify whether the model instance
has been implemented according to the expectations.
"""
expected_fields = (
("form_entry", 'models.ForeignKey("fobi.FormEntry)'),
("form_template_name", "models.CharField"),
("hide_form_title", "models.BooleanField"),
("form_title", "models.CharField"),
("form_submit_button_text", "models.CharField"),
("success_page_template_name", "models.CharField"),
("hide_success_page_title", "models.BooleanField"),
("success_page_title", "models.CharField"),
("success_page_text", "models.CharField"),
)
for field_name, field_info in expected_fields:
if not hasattr(instance, field_name):
raise ImproperlyConfigured(
"You should have a field {0} in your {1} model "
"({2})".format(field_name, field_info, type(instance))
)
def get_context_data(self, request, instance, **kwargs):
"""Get context data."""
context = {
"form_entry": instance.form_entry,
}
context.update(kwargs)
return context
def get_form_template_name(self, request, instance):
"""Get form template name."""
return instance.form_template_name or None
def get_success_page_template_name(self, request, instance):
"""Get succes page template name."""
return instance.success_page_template_name or None
def get_login_required_template_name(self, request, instance):
"""Get login required template name."""
return self.login_required_template_name or None
def get_process_form_redirect_url(self, request, instance):
"""Get process form redirect URL (success).
:param django.http.HttpRequest request:
:param fobi.models.FormEntry instance:
:return str:
"""
return "{0}?{1}={2}".format(
request.path, self.form_sent_get_param, instance.form_entry.slug
)
def _process_form(self, request, instance, **kwargs):
"""Process form.
Handle the form if no "sent" GET argument (see the
``WIDGET_FORM_SENT_GET_PARAM`` setting).
:param django.http.HttpRequest request:
:param fobi.models.FormEntry instance: FormEntry instance.
:return django.http.HttpResponse | str:
"""
template_name = self.get_form_template_name(request, instance)
user_is_authenticated = request.user.is_authenticated
# Handle public/non-public forms. If form requires user authentication
# redirect to login form with next parameter set to current request
# path.
if not user_is_authenticated and not instance.form_entry.is_public:
if self.can_redirect:
return redirect(
"{0}?next={1}".format(settings.LOGIN_URL, request.path)
)
else:
return self._show_login_required_page(
request, instance, **kwargs
)
form_element_entries = instance.form_entry.formelemententry_set.all()[:]
# This is where the most of the magic happens. Our form is being built
# dynamically.
form_cls = assemble_form_class(
instance.form_entry,
form_element_entries=form_element_entries,
request=request,
)
if request.method == "POST":
form = form_cls(request.POST, request.FILES)
# Fire pre form validation callbacks
fire_form_callbacks(
form_entry=instance.form_entry,
request=request,
form=form,
stage=CALLBACK_BEFORE_FORM_VALIDATION,
)
if form.is_valid():
# Fire form valid callbacks, before handling submitted plugin
# form data
form = fire_form_callbacks(
form_entry=instance.form_entry,
request=request,
form=form,
stage=CALLBACK_FORM_VALID_BEFORE_SUBMIT_PLUGIN_FORM_DATA,
)
# Fire plugin processors
form = submit_plugin_form_data(
form_entry=instance.form_entry, request=request, form=form
)
# Fire form valid callbacks
form = fire_form_callbacks(
form_entry=instance.form_entry,
request=request,
form=form,
stage=CALLBACK_FORM_VALID,
)
# Run all handlers
run_form_handlers(
form_entry=instance.form_entry, request=request, form=form
)
# Fire post handler callbacks
fire_form_callbacks(
form_entry=instance.form_entry,
request=request,
form=form,
stage=CALLBACK_FORM_VALID_AFTER_FORM_HANDLERS,
)
messages.info(
request,
_("Form {0} was submitted " "successfully.").format(
instance.form_entry.name
),
)
if self.can_redirect:
return redirect(
self.get_process_form_redirect_url(request, instance)
)
else:
return self._show_thanks_page(request, instance, **kwargs)
else:
# Fire post form validation callbacks
fire_form_callbacks(
form_entry=instance.form_entry,
request=request,
form=form,
stage=CALLBACK_FORM_INVALID,
)
else:
# Providing initial form data by feeding entire GET dictionary
# to the form, if ``GET_PARAM_INITIAL_DATA`` is present in the
# GET.
kwargs = {}
if GET_PARAM_INITIAL_DATA in request.GET:
kwargs = {"initial": request.GET}
form = form_cls(**kwargs)
theme = get_theme(request=request, as_instance=True)
theme.collect_plugin_media(form_element_entries)
form_title = (
instance.form_title
if instance.form_title
else instance.form_entry.title
)
context = self.get_context_data(
request=request,
instance=instance,
form=form,
fobi_theme=theme,
fobi_form_title=form_title,
fobi_hide_form_title=instance.hide_form_title,
fobi_form_submit_button_text=instance.form_submit_button_text,
)
if not template_name:
template_name = theme.view_embed_form_entry_ajax_template
render_kwargs = {
"template_name": template_name,
"context": context,
"request": request,
}
self.rendered_output = render_to_string(**render_kwargs)
def _show_login_required_page(self, request, instance, **kwargs):
"""Displays text with login required.
:param django.http.HttpRequest request:
:return django.http.HttpResponse | str:
"""
context = self.get_context_data(
request=request,
instance=instance,
login_url="{0}?next={1}".format(settings.LOGIN_URL, request.path),
)
template_name = self.get_login_required_template_name(request, instance)
render_kwargs = {
"template_name": template_name,
"context": context,
"request": request,
}
return render_to_string(**render_kwargs)
def _show_thanks_page(self, request, instance, **kwargs):
"""Render the thanks page after successful form submission.
:param django.http.HttpRequest request:
:param fobi.models.FormEntry instance: FormEntry instance.
:return str:
"""
template_name = self.get_success_page_template_name(request, instance)
theme = get_theme(request=request, as_instance=True)
context = self.get_context_data(
request=request,
instance=instance,
fobi_theme=theme,
fobi_hide_success_page_title=instance.hide_success_page_title,
fobi_success_page_title=instance.success_page_title,
fobi_success_page_text=instance.success_page_text,
)
if not template_name:
template_name = theme.embed_form_entry_submitted_ajax_template
render_kwargs = {
"template_name": template_name,
"context": context,
"request": request,
}
self.rendered_output = render_to_string(**render_kwargs)
def _process(self, request, instance, **kwargs):
"""This is where most of the form handling happens.
:param django.http.HttpRequest request:
:param fobi.models.FormEntry instance: FormEntry instance.
:return django.http.HttpResponse | str:
"""
self.integration_check(instance)
if self.form_sent_get_param in request.GET:
return self._show_thanks_page(request, instance, **kwargs)
else:
return self._process_form(request, instance, **kwargs)
| barseghyanartur/django-fobi | src/fobi/integration/processors.py | processors.py | py | 11,441 | python | en | code | 474 | github-code | 50 |
32820225812 | # -*- coding: utf-8 -*-
N = int(input())
i = o = 0
for _ in range(1, N+1):
tmp = int(input())
if tmp in range(10, 21):
i += 1
else:
o += 1
print(i,'in')
print(o,'out')
| carlos3g/URI-solutions | categorias/iniciante/python/1072.py | 1072.py | py | 197 | python | en | code | 1 | github-code | 50 |
13281671821 | import numpy as np
import matplotlib.pyplot as plt
import cv2
import pdb
from skimage import data, color
from skimage.transform import hough_circle
from skimage.transform import hough_ellipse
from skimage.feature import peak_local_max, canny
from skimage.draw import circle_perimeter
from skimage.draw import ellipse_perimeter
from skimage.util import img_as_ubyte
from skimage import exposure
def getpupil(infile):
"""
Uses SCIKIT-LEARN to find eye ellipsese in the image
"""
# Test image setup
#img_dir = '/home/kikimei/Insight/esp/'
#infile = img_dir + 'openeyes/Zorica_Radovic_0001_R.jpg'
# Read image
img = cv2.imread(infile)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img_as_ubyte(gray)
#Add Gaussian Blurr to smooth out features a bit
# Use threshold based on adaptive mean values
thresh = cv2.adaptiveThreshold(img, 200, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 11, 5)
imgblur = cv2.GaussianBlur(img, (3,3), 5)
# Filter the "median" values i.e. skin tone so that the pupil and
# eye area are both showing light/dark lines as "dark" rather than
# being dual values at the extreme high/low
imgmed = np.median(img)
imginit = img
imgfilt = img
imgfilt[img>imgmed] = (imgmed/(255-imgmed))*(-1*img[img>imgmed] + 255)
imgfilt = imgfilt
img = imgfilt
# img = imgblur
pdb.set_trace()
#Get image params
imgw = (img.shape)[0]
imgh = (img.shape)[1]
# Test image
#img = img_as_ubyte(data.coins()[0:95, 70:370])
# Detected edges in the image
edges = canny(img, sigma=3, low_threshold=0.5, high_threshold=0.8)
#edges = canny(img, sigma=3, low_threshold=20)
fig = plt.figure(1)
plt.clf()
ax = fig.gca()
# Search for radii starting at 3 pixels and moving to 1/3 the
# eye-area size in steps of 2 pixels
hough_radii = np.arange(3, imgw/4, 1)
hough_res = hough_circle(edges, hough_radii)
centers = []
accums = []
radii = []
colorimg = color.gray2rgb(imginit)
plt.imshow(img,cmap='gray')
plt.show()
#pdb.set_trace()
# Get two circles per radius search
for radius, h in zip(hough_radii, hough_res):
num_peaks = 2
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:,0], peaks[:,1]])
pdb.set_trace()
radii.extend([radius]*num_peaks)
#Plot all circles found
if len(peaks) != 0:
cx, cy = circle_perimeter( (peaks[0])[0], (peaks[0])[1], radius)
#limit to image range
cx[cx >= imgw] = imgw-1
cx[cx < 0] = 0
cy[cy >= imgh] = imgh-1
cy[cy < 0] = 0
#pdb.set_trace()
colorimg[cx, cy] = (220,20,20)
cx, cy = circle_perimeter( (peaks[1])[0], (peaks[1])[1], radius)
cx[cx >= imgw] = imgw-1
cx[cx < 0] = 0
cy[cy >= imgh] = imgh-1
cy[cy < 0] = 0
colorimg[cx, cy] = (220,20,20)
ax.imshow(colorimg, cmap='gray')
plt.show()
#pdb.set_trace()
#pdb.set_trace()
fig2 = plt.figure(2)
plt.clf()
colorimg_top = color.gray2rgb(img)
plt.imshow(colorimg_top, cmap='gray')
ax2 = fig2.gca()
# Plot top circles
# ::-1 reverses the order
# :5 takes 0--5th position = top 5 in order of accums = peak info
if len(accums) != 0:
for idx in np.argsort(accums)[::-1][:1]:
center_x, center_y = centers[idx]
radius = radii[idx]
cx, cy = circle_perimeter(center_x, center_y, radius)
cx[cx >= imgw] = imgw-1
cx[cx < 0] = 0
cy[cy >= imgh] = imgh-1
cy[cy < 0] = 0
colorimg_top[cx, cy] = (220,20,20)
ax2.imshow(colorimg_top, cmap='gray')
plt.show()
# Most strong circle ==> pupil
ind_top = np.argsort(accums)[::-1][:1]
pcenter_x, pcenter_y = centers[ind_top]
pradius = radii[ind_top]
pheight = accums[ind_top]
pdb.set_trace()
return [pcenter_x, pcenter_y, pradius, pheight]
else:
return [0, 0, 0, 0]
def geteye(infile):
"""
Uses SCIKIT-LEARN to find eye circles in the image
"""
# -------------------------------------------
# SETUP PARAMETERS
threshhough = 4
accuracy = 1
# angle for orientation away from horizonal < 15deg
maxangle = 15
# SETUP EYE IMAGE INPUTS
#img_dir = '/home/kikimei/Insight/esp/'
#infile = img_dir + 'haartraining/negative_images/Zorica_Radovic_0001.jpg'
# Read image
img = cv2.imread(infile)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#img = img_as_ubyte(gray)
img = gray
pdb.set_trace()
#infile = img_dir + 'closedeyes/Zorica_Radovic_0001_L.jpg'
#pdb.set_trace()
#img = cv2.imread(infile)
#np2, np98 = np.percentile(img, (2, 98))
#logimg = exposure.adjust_log(img, 1)
#conimg = exposure.rescale_intensity(img, in_range=(np2, np98))
#pdb.set_trace()
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#ret, thresh = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY)
thresh = cv2.adaptiveThreshold(img, 200, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 11, 5)
imgblur = cv2.GaussianBlur(thresh, (3,3), 20)
# Filter the "median" values i.e. skin tone so that the pupil and
# eye area are both showing light/dark lines as "dark" rather than
# being dual values at the extreme high/low
imgmed = np.median(img)
imginit = img
imgfilt = img
imgfilt[img>imgmed] = (imgmed/(255-imgmed))*(-1*img[img>imgmed] + 255)
img = imgfilt
plt.imshow(img, cmap='gray')
plt.show()
pdb.set_trace()
#vimage = color.rgb2hsv(img)
#gray = vimage[:,:,2]
pdb.set_trace()
imgw = (img.shape)[0]
imgh = (img.shape)[1]
#pdb.set_trace()
#This is the test image
test = 0
if test == 1:
img = img_as_ubyte(data.coffee()[0:220, 160:420])
img = color.rgb2gray(img)
imgw = (img.shape)[0]
imgh = (img.shape)[1]
# Detected edges in the image
edges = canny(img, sigma=2, low_threshold=0.5, high_threshold=0.8)
#pdb.set_trace()
hough_radii = np.arange(3, imgw/2, 2)
pdb.set_trace()
if test == 1:
hough_res = hough_ellipse(edges, threshold=250,
accuracy=20,
min_size=100, max_size=120)
else:
hough_res = hough_ellipse(edges, threshold=threshhough,
accuracy=accuracy,
min_size=np.min(hough_radii),
max_size=np.max(hough_radii))
# Sort all ellipses found by order of "strength"
# require the angle to be within +/- maxangle of horizontal
# require width > height
idx = np.where((hough_res['a'] > 0) &
(hough_res['b'] > 0) &
(hough_res['a'] >= hough_res['b']) &
(abs(hough_res['orientation']*180/np.pi - 90) < maxangle))
hough_res = hough_res[idx]
hough_res.sort(order='accumulator')
best_ellipse = list(hough_res[0])
# Make the first plot
fig2 = plt.figure(2)
plt.clf()
colorimg_top = color.gray2rgb(imginit)
plt.imshow(colorimg_top, cmap='gray')
ax2 = fig2.gca()
# Top Ellipse parameters need to be rounded to integers for images
ellx, elly, ella, ellb = [int(round(par)) for par in best_ellipse[1:5]]
orien = best_ellipse[5]
# setup ellipse params
cx, cy = ellipse_perimeter(ellx, elly, ella, ellb, orien)
cx[cx >= imgw] = imgw-1
cx[cx < 0] = 0
cy[cy >= imgh] = imgh-1
cy[cy < 0] = 0
#pdb.set_trace()
colorimg_top[cx,cy] = (220,100,20)
best_params = np.array([ellx, elly, ella, ellb, orien])
ax2.imshow(colorimg_top, cmap='gray')
plt.show()
pdb.set_trace()
for hellipse in hough_res:
#pdb.set_trace()
hellipse_l = list(hellipse)
xc, yc, ac, ab = [int(round(par)) for par in hellipse_l[1:5]]
orientation = hellipse_l[5]
cx, cy = ellipse_perimeter(xc, yc, ac, ab, orientation)
cx[cx >= imgw] = imgw-1
cx[cx < 0] = 0
cy[cy >= imgh] = imgh-1
cy[cy < 0] = 0
colorimg_top[cx, cy] = (0,200,0)
#ax2.imshow(colorimg_top, cmap='gray')
#plt.show()
#pdb.set_trace()
return best_params
pdb.set_trace()
def check_pupils(inlist, outfile):
"""
"""
# Get input list of all eyes
eyefiles = np.genfromtxt(inlist, dtype=None)
for eyefile in eyefiles:
pupilres = getpupil(eyefile)
#eyeres = geteye(eyefile)
if (abs(pupilres[0] - 12) < 3) & (abs(pupilres[0] - 12) < 3):
pupil_mid = 1
else:
pupil_mid = 0
pupilres = [pupilres[0], pupilres[1], pupilres[2], pupilres[3], pupil_mid]
if 'pupil_arr' in vars():
pupil_arr = np.vstack((pupil_arr, np.array([pupilres])))
else:
pupil_arr = np.array([pupilres])
print(pupilres)
#pdb.set_trace()
np.savetxt(outfile, pupil_arr, delimiter=',', fmt='%10.5f')
pdb.set_trace()
def check_eyes(inlist, outfile):
# Get input list of all eyes
eyefiles = np.genfromtxt(inlist, dtype=None)
for eyefile in eyefiles:
#pupilres = getpupil(eyefile)
pupilres = geteye(eyefile)
if (abs(pupilres[0] - 12) < 3) & (abs(pupilres[0] - 12) < 3):
pupil_mid = 1
else:
pupil_mid = 0
pupilres = [pupilres[0], pupilres[1], pupilres[2], pupilres[3], pupil_mid]
if 'pupil_arr' in vars():
pupil_arr = np.vstack((pupil_arr, np.array([pupilres])))
else:
pupil_arr = np.array([pupilres])
print(pupilres)
pdb.set_trace()
np.savetxt(outfile, pupil_arr, delimiter=',', fmt='%10.5f')
pdb.set_trace()
def simple_pupils(inlist, outfile):
"""
"""
# Get input list of all eyes
eyefiles = np.genfromtxt(inlist, dtype=None)
for eyefile in eyefiles:
img = cv2.imread(eyefile)
grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ysum = np.sum(img, axis=1)
xsum = np.sum(img, axis=0)
pupils = getpupil(eyefile)
if (abs(pupilres[0] - 12) < 3) & (abs(pupilres[0] - 12) < 3):
pupil_mid = 1
else:
pupil_mid = 0
pupilres = [pupilres[0], pupilres[1], pupilres[2], pupilres[3], pupil_mid]
if 'pupil_arr' in vars():
pupil_arr = np.vstack((pupil_arr, np.array([pupilres])))
else:
pupil_arr = np.array([pupilres])
print(pupilres)
#pdb.set_trace()
np.savetxt(outfile, pupil_arr, delimiter=',', fmt='%10.5f')
pdb.set_trace()
| kimberly-aller/esp-insight | pro/findellipse.py | findellipse.py | py | 11,238 | python | en | code | 0 | github-code | 50 |
7658514654 | from django.conf.urls import include, url
from django.contrib import admin
from guangshuai_test.views import *
urlpatterns = [
# Examples:
# url(r'^$', 'idctools.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^guangshuai_test/',include('guangshuai_test.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$',index),
url(r'^guangshuai_result/$',guangshuai_result),
url(r'^test/$',test),
url(r'^test2/$',test2),
url(r'^module_number',module_number),
url(r'^port_channel',port_channel),
url(r'^ajax',data_ajax),
url(r'^ping_monitor',ping_threading),
url(r'^once_check',once_check),
]
| sdgdsffdsfff/idctools | idctools/urls.py | urls.py | py | 672 | python | en | code | 0 | github-code | 50 |
546654986 | # There are a total of numCourses courses you have to take, labeled from 0 to numCourses - 1. You are given an array prerequisites where prerequisites[i] = [ai, bi] indicates that you must take course bi first if you want to take course ai.
# For example, the pair [0, 1], indicates that to take course 0 you have to first take course 1.
# Return true if you can finish all courses. Otherwise, return false.
# Idea is to create an adjacency list and run a dfs on it
# Adjacency list is stored as preMap dictionary with list as values
# Create visited set to keep track of nodes visited
# In dfs function, return False if course has been visited (indicates cycle)
# return True if particular course does not have any prerequisites
# In each dfs run, loop through the values in particular key of preMap
# Returns false if dfs(prereq) is False
# Remove course from visited
# Set preMap[course] to empty list to show that it has been visited
# Else return True
# Call dfs function on each course
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
# Create adjacency list of courses and their prerequisites
preMap = {i:[] for i in range(numCourses)}
for course, prereq in prerequisites:
preMap[course].append(prereq)
# Create visited set to store courses that have been visited
visited = set()
def dfs(course):
# Means there is a cycle
if course in visited:
return False
if preMap[course] == []:
return True
visited.add(course)
for pre in preMap[course]:
if not dfs(pre): # If dfs(pre) returns False, then function returns False
return False
visited.remove(course)
preMap[course] = [] # Set particular course to empty list since we would already know that course can be completed
return True
for crs in range(numCourses):
if not dfs(crs):
return False
return True
| tieonster/leetcode | Graphs/Questions/courseSchedule.py | courseSchedule.py | py | 2,195 | python | en | code | 0 | github-code | 50 |
6114938846 | from email.mime.application import MIMEApplication
from flask import Flask, request, jsonify,session
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager, create_access_token, jwt_required, get_jwt_identity
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from celery import Celery
from datetime import datetime, timedelta
import redis
from sqlalchemy import or_
import pandas as pd
from os.path import basename
app = Flask(__name__)
CORS(app, resources={r"/api/*": {"origins": "http://localhost:5173"}})
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///movie.db'
app.config['SECRET_KEY'] = 'your_secret_key_here' # Replace with your own secret key
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(minutes=30) # Session lifetime
db = SQLAlchemy(app)
jwt = JWTManager(app)
# Initialize Redis connection
redis_db = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
class user(db.Model):
userid = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
username = db.Column(db.String(50), unique=True, nullable=False)
useremail = db.Column(db.String(120), unique=True, nullable=False)
userpassword = db.Column(db.String(128), nullable=False)
admin = db.Column(db.Boolean, default=False)
def __repr__(self):
return f"<User {self.username}>"
class venue(db.Model):
venueid = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
venuename = db.Column(db.String(100), nullable=False)
venueplace = db.Column(db.String(100), nullable=False)
venuelocation = db.Column(db.String(100), nullable=False)
venuecapacity = db.Column(db.Integer, nullable=False)
venueshows = db.relationship('venueshow', backref='venue', lazy=True)
class show(db.Model):
showid = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
showname = db.Column(db.String(100), nullable=False)
showrating = db.Column(db.Integer, nullable=False)
showtags = db.Column(db.String(100), nullable=False)
class venueshow(db.Model):
venueshowid = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
venueid = db.Column(db.Integer, db.ForeignKey('venue.venueid'), nullable=False)
showid = db.Column(db.Integer, db.ForeignKey('show.showid'), nullable=False)
showdate = db.Column(db.String(100), nullable=False)
timing = db.Column(db.String(100), nullable=False)
price = db.Column(db.Integer, nullable=False)
bookedseats= db.Column(db.Integer, default=0)
# Define the relationship with the 'show' model
show = db.relationship('show', backref='venueshows', lazy=True)
class booking(db.Model):
bookingid = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
showid = db.Column(db.Integer, db.ForeignKey('show.showid'), nullable=False)
venueid = db.Column(db.Integer, db.ForeignKey('venue.venueid'), nullable=False)
userid = db.Column(db.Integer, db.ForeignKey('user.userid'), nullable=False)
seats = db.Column(db.Integer, nullable=False)
class statushistory(db.Model):
#userid,lastlogin,status(online or offline),primary key is userid
userid=db.Column(db.Integer,primary_key=True,unique=True,nullable=False)
lastlogin=db.Column(db.DateTime,nullable=False)
status=db.Column(db.Integer,default=0)
@app.route('/api/register', methods=['POST'])
def register():
data = request.json
username = data.get('username')
email = data.get('email')
password = data.get('password')
if not email or not password:
return jsonify({"error": "Please provide both email and password."}), 400
existing_user = user.query.filter_by(useremail=email).first()
if existing_user:
return jsonify({"error": "Email already registered. Please use a different email."}), 400
hashed_password = generate_password_hash(password)
new_user = user(username=username, useremail=email, userpassword=hashed_password)
db.session.add(new_user)
db.session.commit()
#take the userid as output and give email as input
adduser=user.query.filter_by(useremail=email).first()
print(adduser.userid)
activeuser=statushistory(userid=adduser.userid,lastlogin=datetime.now(),status=0)
print(new_user)
db.session.add(activeuser)
db.session.commit()
# Create and return the access token for the new user
access_token = create_access_token(identity=new_user.userid)
return jsonify({"message": "Registration successful!", "access_token": access_token}), 201
@app.route('/api/login', methods=['POST'])
def login():
data = request.json
email = data.get('email')
password = data.get('password')
if not email or not password:
return jsonify({"error": "Please provide both email and password."}), 400
user_check = user.query.filter_by(useremail=email).first()
existing_status_record = db.session.query(statushistory).filter(statushistory.userid == user_check.userid).first()
existing_status_record.status = 1
existing_status_record.lastlogin = datetime.now()
db.session.commit()
if not user or not check_password_hash(user_check.userpassword, password):
return jsonify({"error": "Invalid credentials."}), 401
# Authentication successful, create and return the access token
access_token = create_access_token(identity=user_check.userid)
# Store access code in Redis
redis_db.set(user_check.userid, access_token)
# Start a session for the user
session.permanent = True
session['user_id'] = user_check.userid
return jsonify({"access_token": access_token,
"userid":user_check.userid}), 200
@app.route('/api/login-admin', methods=['POST'])
def login_admin():
data = request.json
print(data)
email = data.get('email')
password = data.get('password')
if not email or not password:
return jsonify({"error": "Please provide both email and password."}), 400
user_check = user.query.filter_by(useremail=email).first()
if not user_check or not check_password_hash(user_check.userpassword,password) :
return jsonify({"error": "Invalid credentials."}), 401
if not user_check.admin:
return jsonify({"error": "You are not authorized to access this page."}), 403
# Authentication successful, create and return the access token
access_token = create_access_token(identity=user_check.userid)
redis_db.set(user_check.userid, access_token)
# Start a session for the user
session.permanent = True
session['user_id'] = user_check.userid
return jsonify({"access_token": access_token,
"userid":user_check.userid}), 200
@app.route('/api/logout', methods=['POST'])
def logout():
if 'user_id' in session:
user_id = session['user_id']
# Remove access code from Redis
redis_db.delete(user_id)
# Clear the session
session.pop('user_id', None)
return jsonify({'message': 'Logged out successfully'})
else:
return jsonify({'message': 'User is not logged in'})
@app.route('/api/reset', methods=['POST'])
def reset():
data = request.json
print(data)
email = data.get('email')
password = data.get('password')
if not email or not password:
return jsonify({"error": "Please provide both email and password."}), 400
user_check = user.query.filter_by(useremail=email).first()
if not user_check:
return jsonify({"error": "Invalid credentials."}), 401
hashed_password = generate_password_hash(password)
user_check.userpassword = hashed_password
db.session.commit()
# Authentication successful, create and return the access token
access_token = create_access_token(identity=user_check.userid)
return jsonify({"access_token": access_token}), 200
@app.route('/api/create-venue', methods=['POST'])
def create_venue():
data = request.json
name = data.get('name')
place = data.get('place')
location = data.get('location')
capacity = data.get('capacity')
print(data)
if not name or not place or not location or not capacity:
return jsonify({"error": "Please provide all the required information."}), 400
new_venue = venue(venuename=name, venueplace=place, venuelocation=location, venuecapacity=capacity)
db.session.add(new_venue)
db.session.commit()
return jsonify({"message": "Venue created successfully!"}), 201
@app.route('/api/create-show', methods=['POST'])
def create_show():
data = request.json
name = data.get('name')
rating = data.get('rating')
timing = data.get('timing')
tags= data.get('tags')
if not name or not rating:
return jsonify({"error": "Please provide both name and rating."}), 400
new_show = show(showname=name, showrating=rating, showtags=tags)
db.session.add(new_show)
db.session.commit()
return jsonify({"message": "Show created successfully!"}), 201
@app.route('/api/create-venueshow', methods=['POST'])
def create_venueshow():
data = request.json
venue_id = data.get('venueid')
show_id = data.get('showid')
date_str = data.get('ldate') # Update the key to 'ldate'
timings = data.get('timing')
price = data.get('price')
print(data)
if not venue_id or not show_id or not date_str or not timings or not price:
return jsonify({"error": "Please provide all the required information."}), 400
new_venueshow = venueshow(venueid=venue_id, showid=show_id, showdate=date_str, timing=timings, price=price)
try:
db.session.add(new_venueshow)
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return jsonify({"error": "Failed to create the show. Please try again later."}), 500
return jsonify({"message": "Show created successfully!"}), 201
@app.route('/api/shows', methods=['GET'])
def get_shows():
shows = show.query.all()
show_list = []
for movie in shows:
show_data = {
'showid': movie.showid,
'showname': movie.showname,
'showrating': movie.showrating,
'showtags': movie.showtags
}
show_list.append(show_data)
return jsonify(show_list)
@app.route('/api/venues-with-shows', methods=['GET'])
def get_venues_with_shows():
try:
# Fetch venue details along with shows using appropriate database queries
venues_with_shows = []
venues = venue.query.all()
for theatre in venues:
venue_data = {
'id': theatre.venueid,
'name': theatre.venuename,
'shows': []
}
for venue_show in theatre.venueshows: # Accessing the correct back-reference name
show_data = {
'id': venue_show.showid,
'movieName': venue_show.show.showname, # Access the 'showname' through the relationship
'date': venue_show.showdate,
'location': theatre.venuelocation
}
venue_data['shows'].append(show_data)
venues_with_shows.append(venue_data)
return jsonify(venues_with_shows), 200
except Exception as e:
print(e)
return jsonify({"error": "Failed to fetch venues with shows."}), 500
@app.route('/api/delete-show/<int:venue_id>/<int:show_id>', methods=['DELETE'])
def delete_show(venue_id, show_id):
try:
screen = venueshow.query.filter_by(showid=show_id, venueid=venue_id).first()
if screen:
db.session.delete(screen)
db.session.commit()
return jsonify({'message': f'Show with ID {show_id} deleted from venue with ID {venue_id}'}), 200
else:
return jsonify({'error': 'Show not found'}), 404
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/delete-venue/<int:venue_id>', methods=['DELETE'])
def delete_venue(venue_id):
try:
print(venue_id)
venues = venueshow.query.filter_by(venueid=venue_id).all()
if venues:
# Delete each venueshow record associated with the venue_id
for venue_show in venues:
db.session.delete(venue_show)
db.session.commit()
return jsonify({'message': f'Venue shows with ID {venue_id} deleted successfully'}), 200
# Delete the main venue record
mainvenue = venue.query.get(venue_id)
if mainvenue:
db.session.delete(mainvenue)
db.session.commit()
return jsonify({'message': f'Main Venue with ID {venue_id} deleted successfully'}), 200
else:
return jsonify({'error': 'Main Venue not found'}), 404
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route('/api/show/<int:show_id>', methods=['GET'])
def get_show_details(show_id):
try:
Show = show.query.get(show_id)
if not Show:
return jsonify({"error": "Show not found"}), 404
show_data = {
"id": Show.showid,
"name": Show.showname,
"rating": Show.showrating,
"tags": Show.showtags
}
print(show_data)
return jsonify(show_data), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/api/update-show/<int:show_id>', methods=['PUT'])
def update_show(show_id):
try:
Show = show.query.get(show_id)
if not Show:
return jsonify({"error": "Show not found"}), 404
data = request.json
Show.showname = data.get('name', Show.showname)
Show.showrating = data.get('rating', Show.showrating)
Show.showtags = data.get('tags', Show.showtags)
db.session.commit()
return jsonify({"message": "Show updated successfully"}), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/api/venue/<int:venue_id>', methods=['GET'])
def get_venue_details(venue_id):
try:
venue_record = venue.query.get(venue_id)
if not venue_record:
return jsonify({"error": "Venue not found"}), 404
venue_data = {
"id": venue_record.venueid,
"venuename": venue_record.venuename,
"venueplace": venue_record.venueplace,
"venuelocation": venue_record.venuelocation,
"venuecapacity": venue_record.venuecapacity
}
print(venue_data)
return jsonify(venue_data), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/api/edit-venue/<int:venue_id>', methods=['PUT'])
def edit_venue(venue_id):
try:
venue_record = venue.query.get(venue_id)
if not venue_record:
return jsonify({"error": "Venue not found"}), 404
data = request.get_json()
venue_record.venuename = data.get('name', venue_record.venuename)
venue_record.venueplace = data.get('place', venue_record.venueplace)
venue_record.venuelocation = data.get('location', venue_record.venuelocation)
venue_record.venuecapacity = data.get('capacity', venue_record.venuecapacity)
db.session.commit()
return jsonify({'message': f'Venue with ID {venue_id} updated successfully'}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/booking-analytics', methods=['GET'])
def get_booking_analytics():
# Query to count the number of venueshows for each show (movie)
show_theater_counts = db.session.query(venueshow.showid, db.func.count(venueshow.venueshowid)).\
group_by(venueshow.showid).all()
# Convert the result into a dictionary
movie_theaters = {show_id: count for show_id, count in show_theater_counts}
# Query all movies from the 'show' table
movies = show.query.all()
# Prepare the response data
movie_data = [{"name": movie.showname, "theaters": movie_theaters.get(movie.showid, 0)} for movie in movies]
booking_analytics_data = {"movies": movie_data}
return jsonify(booking_analytics_data)
@app.route('/api/genres', methods=['GET'])
def get_genres():
genres_query = db.session.query(show.showtags.distinct()).all()
genres = [{"id": i + 1, "name": genre[0]} for i, genre in enumerate(genres_query)]
return jsonify(genres)
@app.route('/api/cities', methods=['GET'])
def get_locations():
locations_query = db.session.query(venue.venueplace.distinct()).all()
locations = [{"id": i + 1, "name": location[0]} for i, location in enumerate(locations_query)]
return jsonify(locations)
@app.route('/api/search', methods=['POST'])
def search_tickets():
data = request.get_json() # Get the JSON data from the request
genre = data.get('genre')
city = data.get('city')
date = data.get('date')
# Build the query to fetch the shows based on the search criteria
direct_match_query = db.session.query(venueshow).join(show).join(venue).filter(
show.showtags == genre, venue.venueplace == city, venueshow.showdate == date).all()
# If no direct match, try searching individually
if not direct_match_query:
date_match_query = db.session.query(venueshow).join(show).join(venue).filter(
venueshow.showdate == date).all()
genre_match_query = db.session.query(venueshow).join(show).join(venue).filter(
show.showtags == genre).all()
location_match_query = db.session.query(venueshow).join(show).join(venue).filter(
venue.venueplace == city).all()
search_query = date_match_query + genre_match_query + location_match_query
else:
search_query = direct_match_query
if not search_query and not direct_match_query:
search_query=db.session.query(venueshow).join(show).join(venue).all()
# Convert search results to a list of dictionaries
search_results = []
for result in search_query:
search_results.append({
"venueid": result.venueid,
"showid": result.showid,
"venuename": result.venue.venuename,
"showname": result.show.showname,
"venuelocation": result.venue.venuelocation,
"showdate": result.showdate,
"timing": result.timing,
"price": result.price,
"seats": result.venue.venuecapacity,
"bookedseats": result.bookedseats,
})
print(search_results)
return jsonify(search_results), 200
@app.route('/api/shows/<int:venueid>/<int:showid>', methods=['GET'])
def get_show_info(venueid, showid):
print(venueid, showid)
try:
showd = venueshow.query.filter_by(venueid=venueid, showid=showid).first()
print(showd)
if showd:
show_data = {
'showid': showd.showid,
'venueid': showd.venueid,
'seatprice': showd.price,
'bookedseats': showd.bookedseats,
}
print(show_data)
return jsonify(show_data)
else:
return jsonify({'message': 'Show not found'}), 404
except Exception as e:
return jsonify({'message': 'An error occurred', 'error': str(e)}), 500
@app.route('/api/bookings', methods=['POST'])
def add_booking():
try:
data = request.json
print(data)
new_booking = booking(
userid=data['userid'],
showid=data['showid'],
venueid=data['venueid'],
seats=data['seatsbooked']
)
db.session.add(new_booking)
# Update bookedseats in venueshow record
show_to_update = venueshow.query.filter_by(venueid=data['venueid'], showid=data['showid']).first()
show_to_update.bookedseats += data['seatsbooked']
db.session.commit()
return jsonify({'message': 'Booking added successfully'})
except Exception as e:
db.session.rollback()
return jsonify({'message': 'An error occurred', 'error': str(e)}), 500
@app.route('/api/bookings/<int:user_id>', methods=['GET'])
def get_user_bookings(user_id):
try:
#print(user_id)
user_bookings = booking.query.filter_by(userid=user_id).all()
#print(user_bookings)
bookings_data = []
for user_booking in user_bookings:
show_info = show.query.get(user_booking.showid)
#print(show_info)
venue_info = venue.query.get(user_booking.venueid)
#print(venue_info)
booking_data = {
'bookingId': user_booking.bookingid,
'showName': show_info.showname,
'venueName': venue_info.venuename,
'venueLocation': venue_info.venuelocation,
'seatsBooked': user_booking.seats
}
bookings_data.append(booking_data)
#print(booking_data)
return jsonify(bookings_data)
except Exception as e:
return jsonify({'message': 'An error occurred', 'error': str(e)}), 500
@app.route('/api/<int:booking_id>/<int:rating>', methods=['POST'])
def add_rating(booking_id,rating):
print(booking_id)
print(rating)
try:
booking_record = booking.query.get(booking_id)
print(booking_record)
booking_record.rating = rating
# Update the rating of the show
show_record = show.query.get(booking_record.showid)
show_record.showrating = (show_record.showrating + rating) / 2
db.session.commit()
return jsonify({'message': 'Rating added successfully'})
except Exception as e:
return jsonify({'message': 'An error occurred', 'error': str(e)}), 500
@app.route('/api/cancel/<int:booking_id>', methods=['DELETE'])
def cancel_booking(booking_id):
print(booking_id)
bookings = booking.query.get(booking_id)
if bookings:
# Get the corresponding show record
vid=bookings.venueid
sid=bookings.showid
# Update seat availability for the canceled booking
ven=venueshow.query.filter_by(venueid=vid, showid=sid).first()
ven.bookedseats-=bookings.seats
db.session.delete(bookings)
db.session.commit()
return jsonify({'message': 'Booking cancelled successfully'})
else:
return jsonify({'message': 'Booking not found'}), 404
@app.route('/api/download/<int:user_id>',methods=['GET'])
def downloadreport(user_id):
monthly_job.delay(user_id)
return jsonify({'message':'Mail send'})
# An example of a protected route that requires authentication
@app.route('/api/protected', methods=['GET'])
@jwt_required()
def protected_route():
current_user_id = get_jwt_identity()
current_user = user.query.get(current_user_id)
return jsonify({"message": f"Hello {current_user.username}, you are authenticated!"}), 200
app.config['celery_broker'] = 'redis://localhost:6379/0'
app.config['backend'] = 'redis://localhost:6379/1'
celeryapp = Celery(app.name, broker=app.config['celery_broker'])
celeryapp.conf.update(app.config)
celeryapp.conf.beat_schedule = {
'daily-job':{
'task': 'app.daily_job',
'schedule': 86400, # Run every 24hr 86400
},
'monthly-job': {
'task': 'app.monthly_job',
'schedule': 2592000, # Run every 30 days 2592000 (in seconds)
},
'monthly-job2': {
'task': 'app.admin_monthly_job',
'schedule': 2592000, # Run every 30 days 2592000 (in seconds)
},
}
celeryapp.conf.timezone = 'UTC'
@celeryapp.task
def daily_job():
with app.app_context():
twenty_four_hours_ago = datetime.now() - timedelta(hours=24)
users_to_notify = db.session.query(statushistory).filter(statushistory.lastlogin <= twenty_four_hours_ago).all()
for user in users_to_notify:
send_email(user.userid)
print("Send to user successfully")
return len(users_to_notify)
@celeryapp.task
def monthly_job(user_id):
with app.app_context():
# Retrieve user information from the database
users = user.query.get(user_id)
if users:
user_bookings = get_bookings(users.userid)
# Generate the report content
report = generate_report(user_bookings)
# Send email to user
send_report_email(users.useremail, report)
print(f"Report email sent to user: {users.useremail}")
else:
print(f"User with ID {user_id} not found")
def send_email(userid):
# Email configuration
inactiveuser=db.session.query(user).filter_by(userid=userid).first()
print(inactiveuser)
smtp_server = "smtp.gmail.com"
smtp_port = 25
smtp_username = "vyshakhgnair.cvr@gmail.com"
smtp_password = "nesaigghheyaemau"
sender_email = "vyshakhgnair.cvr@gmail.com"
recipient_email = inactiveuser.useremail
# Create the email content
subject = "Daily Update"
body = f"Hello {inactiveuser.username},\n\nWe hope this message finds you well. 🎬\n\nWe noticed that you recently explored our movie booking website but haven't finalized your movie plans yet. Don't miss out on the excitement and entertainment that await you!\n\n� Catch the latest blockbusters on the big screen.\n\n🎉 Enjoy a memorable movie night with your loved ones.\n\n🎫 Reserve your seats hassle-free from the comfort of your home.\n\nYour movie experience is just a few clicks away. Head back to our website and grab the best seats for your preferred showtime. Whether you're into action-packed adventures, heartwarming dramas, or side-splitting comedies, we have something special in store for you."
# Create the email message
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = recipient_email
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
# Connect to the SMTP server and send the email
try:
server = smtplib.SMTP(smtp_server, smtp_port)
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(sender_email, recipient_email, msg.as_string())
server.quit()
print("Email sent successfully")
except Exception as e:
print("Error sending email:", e)
def get_bookings(user_id):
try:
user_bookings = booking.query.filter_by(userid=user_id).all()
bookings_data = []
for user_booking in user_bookings:
show_info = show.query.get(user_booking.showid)
venue_info = venue.query.get(user_booking.venueid)
booking_data = {
'bookingId': user_booking.bookingid,
'showName': show_info.showname,
'venueName': venue_info.venuename,
'venueLocation': venue_info.venuelocation,
'seatsBooked': user_booking.seats
}
bookings_data.append(booking_data)
return bookings_data
except Exception as e:
print(e) # Print the error for debugging purposes
return []
def generate_report(bookings_data):
report = "Here is your monthly movie booking report:\n\n"
for booking_data in bookings_data:
report += f"Booking ID: {booking_data['bookingId']}\n"
report += f"Movie Name: {booking_data['showName']}\n"
report += f"Venue: {booking_data['venueName']} ({booking_data['venueLocation']})\n"
report += f"Seats Booked: {booking_data['seatsBooked']}\n\n"
report += "Don't miss out on more exciting movies this month!\n\nBest regards,\nThe Movie Booking Team"
return report
def send_report_email(useremail, report):
# Email configuration
smtp_server = "smtp.gmail.com"
smtp_port = 25
smtp_username = "vyshakhgnair.cvr@gmail.com"
smtp_password = "nesaigghheyaemau"
sender_email = "vyshakhgnair.cvr@gmail.com"
recipient_email = useremail
# Create the email content
subject = "Monthly Movie Booking Report"
# Create the email message
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = recipient_email
msg['Subject'] = subject
msg.attach(MIMEText(report, 'plain'))
# Connect to the SMTP server and send the email
try:
server = smtplib.SMTP(smtp_server, smtp_port)
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(sender_email, recipient_email, msg.as_string())
server.quit()
print("Report email sent successfully")
except Exception as e:
print("Error sending report email:", e)
@app.route('/api/admin-monthly/<int:venueid>/<int:userid>',methods=['GET'])
def admin_csv(venueid,userid):
admin_monthly_job.delay(venueid,userid)
return jsonify({'message':'Mail send'})
@celeryapp.task
def admin_monthly_job(venueid,userid):
with app.app_context():
# Retrieve information on each shows running in the venue and create a csv file for each venue
venue_shows = venueshow.query.filter_by(venueid=venueid).all()
print(venue_shows)
venue_ven = venue.query.get(venueid)
print(venue_ven)
venue_name = venue_ven.venuename
print(venue_name)
venue_location = venue_ven.venuelocation
print(venue_location)
venue_shows_data = []
for venue_show in venue_shows:
show_info = show.query.get(venue_show.showid)
print(show_info)
venue_shows_data.append({
'showName': show_info.showname,
'showDate': venue_show.showdate,
'showTime': venue_show.timing,
'seatsBooked': venue_show.bookedseats
})
print(venue_shows_data)
# Create a Pandas DataFrame from the list of dictionaries
df = pd.DataFrame(venue_shows_data)
print(df)
# Create a CSV file from the DataFrame
csv_file_name = f"{venue_name}_{venue_location}.csv"
df.to_csv(csv_file_name, index=False)
print(csv_file_name)
# Send the CSV file as an email attachment to the current login users mail
users=user.query.get(userid)
send_csv_email(users.useremail,csv_file_name, venue_name, venue_location)
print("CSV email sent successfully")
return jsonify({'message': 'CSV email sent successfully'}), 200
def send_csv_email(usermail,csv_file_name, venue_name, venue_location):
# Email configuration
smtp_server = "smtp.gmail.com"
smtp_port = 25
smtp_username = "vyshakhgnair.cvr@gmail.com"
smtp_password = "nesaigghheyaemau"
sender_email = "vyshakhgnair.cvr@gmail.com"
recipient_email = usermail
# Create the email content
subject = f"Monthly Movie Booking Report for {venue_name} ({venue_location})"
# Create the email message
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = recipient_email
msg['Subject'] = subject
# Attach the CSV file
with open(csv_file_name, 'rb') as f:
part = MIMEApplication(f.read(), Name=basename(csv_file_name))
part['Content-Disposition'] = f'attachment; filename="{basename(csv_file_name)}"'
msg.attach(part)
# Connect to the SMTP server and send the email
try:
server = smtplib.SMTP(smtp_server, smtp_port)
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(sender_email, recipient_email, msg.as_string())
server.quit()
print("CSV email sent successfully")
except Exception as e:
print("Error sending CSV email:", e)
if __name__ == '__main__':
app.run(debug=True)
celeryapp.start()
| vyshakhgnair/Movie-Booker | app-copy.py | app-copy.py | py | 32,091 | python | en | code | 0 | github-code | 50 |
16528695422 |
abuelita=500
juan=200 + abuelita
jose=300 + abuelita
Total=juan+jose
print("\n\n\n 1-Juan y José son hermanos, Juan tiene $200 y José tiene $300,"
"además cada uno recibió $500 de su abuelita, ¿Cuánto dinero "
"tienen entre los 2? \n")
print (" El total es" , Total)
| Alejandro32/ejecicios-python | Problemas/Problemas Basicos/pro1.py | pro1.py | py | 296 | python | es | code | 0 | github-code | 50 |
10756845927 | a = "вЕнЕрА в доМЕ рЫб"
b = "СатУрн В ВОДОлее"
c = "Когда-ТО БуДЕт ЧетВерг"
print(a.title())
print(b.title())
print(c.title())
print("Впереди" + " большие " + "неожиданности")
print(7 == 5, 42 > 13, 144**(1/2))
print(2 * 7 - 10 > 2**4)
# Остаток от деления
z = 42
print("Число z четное?", z % 2 == 0)
y = "2000"
is_leap = (int(y) % 4) == 0
print("Год " + y + " является високосным?", is_leap)
# tuples are immutable - кортежи неизменяемые
t = (2, 12, 85, 7)
print("Тапл:", t)
print("Второй элемент тапла:", t[1])
L = [3, 4, 15, 16, 23, 42]
print("Список:", L)
print("Третий элемент списка:", L[2])
t1 = (2, 12, 85, 7)
t2 = (32, 12, 6)
t = t1 + t2
print(t)
L1 = [2, 12]
L1.append(85)
L1 = L1 + [7]
L2 = [32, 12, 6]
L = L1 + L2
print(L)
planets = ["Земля", "Венера", "Меркурий"]
print(planets)
planets.append("Марс")
planets.append("Сатурн")
print(planets)
print(planets + ["Уран", "Нептун"])
planets.pop(2)
print(planets)
locations = {} # пустой словарь
locations["Валентин"] = "Москва"
locations["Андрей"] = "Санкт-Петербург"
locations["Светлана"] = "Казань"
print(locations)
print("Иван" in locations)
print("Валентин" in locations)
a = 5
if a < 10:
print("Я, великий Каа, вижу вы указали однозначное число")
name = "Константин"
if len(name) > 5:
print("Какое длинное имя, " + name + "!")
else:
print("К нам приходит " + name)
month = 7
all_months = ["Январь", "Февраль", "Март", "Апрель", "Май", "Июнь", "Июль",
"Август", "Сентябрь", "Ноябрь", "Декабрь"]
if month == 1:
num_days = 28
elif month > 6:
if month % 2:
num_days = 31
else:
num_days = 30
else:
if month % 2:
num_days = 30
else:
num_days = 31
print("В месяце " + all_months[month] + " " + str(num_days) + " дней")
course_by = "skillfactory"
i = 0
while i < len(course_by):
print("* " + course_by[i] + " *")
i = i + 1
course_by = "skillfactory"
i = 0
while i < len(course_by):
print("=" + course_by[i].center(30) + "=")
i = i + 1
import random
course_by = "skillfactory"
i = 0
while i < 10:
index = random.randrange(0, len(course_by))
print(course_by[index])
i = i + 1
times = ["утром", "днём", "вечером", "ночью", "после обеда", "перед сном"]
advices = ["ожидайте", "предостерегайтесь", "будьте открыты для"]
promises = ["гостей из забытого прошлого", "встреч со старыми знакомыми",
"неожиданного праздника", "приятных перемен"]
generated_prophecies = []
i = 0
while i < 3:
index_t = random.randrange(0, len(times))
index_a = random.randrange(0, len(advices))
index_p = random.randrange(0, len(promises))
print(times[index_t] + " " + advices[index_a] + " " + promises[index_p])
i = i + 1
i = 0
while i < 3:
t = random.choice(times)
a = random.choice(advices)
p = random.choice(promises)
print(t + " " + a + " " + p)
i = i + 1 | would-you-kindly/PythonForWeb | 1/horoscope.py | horoscope.py | py | 3,449 | python | ru | code | 0 | github-code | 50 |
25478403431 |
from biztest.util.easymock.easymock import Easymock
class ScbMock(Easymock):
# 用户中心mock
def update_fk_userinfo(self, id_card_encrypt, full_name_encrypt, no_encrypt):
api = "/tha/individual/getUserInfoByType"
mode = {
"msg": "success",
"data": {
"individual": {
"id_card": "11****052830*",
"id_card_encrypt": id_card_encrypt,
"full_name": "น*******************ง",
"full_name_encrypt": full_name_encrypt,
"email": "h*******f@yahoo.com",
"email_encrypt": "enc_04_4106517378224365568_767",
"phone": "0********0",
"phone_encrypt": "enc_01_3729739132709636096_329",
"birthday": "09/02/1990",
"gender": "female",
"address": "***/*** หมู่ที่ * ต.ท่าธง อ.บางบัวทอง จ.นนทบุรี",
"address_encrypt": "enc_06_3058548273310081024_351"
},
"bankCard": [
{
"uuid": "6621061000000005302",
"bank_code": "T00007", # cimb 需要是022这个银行才能成功
"name_encrypt": full_name_encrypt,
"no": "5555****55",
"no_encrypt": no_encrypt,
"order": 1,
"bank_name": "KIA TN AKIN BANK PUBLIC COMPANY LIMITED",
"is_effectived": "Y",
"bank_name_thailand": "ธนาคารเกียรตินาคิน จ ากัด (มหาชน)"
}
]
}
}
self.update(api, mode)
# 获取token mock
def update_gettoken(self, status):
api = "/partners/v1/oauth/token"
if status == "success":
mode = {
"status ": {
"code": 1000,
"description": "Success"
},
"data": {
"accessToken": "16659ae7-84c6-4125-8442-f6659c5ce02c",
"tokenType": "Bearer",
"expiresIn": 3600,
"expiresAt": 1530544107
}
}
else:
mode = {
"status ": {
"code": 9700,
"description": "Generic Business Error"
}
}
self.update(api, mode)
# 验证付款信息并创建订单
def update_withdraw_initiate(self, status):
api = "/partners/v2/payment/transfer/credit/initiate"
if status == "success":
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": [{
"tokenizerId": "D014917201627108",
"transactionDateTime": "2021-06-09T14:49:38.754+07:00",
"customerRefNum": "123456789012345678901234567890"
}]
}
elif status == "500":
mode = {"_res": {"status": 500}}
else:
mode = {
"status": {
"code": 401,
"description": "FAILED"
},
"data": [{
"tokenizerId": "D014917201627108",
"transactionDateTime": "2021-06-09T14:49:38.754+07:00",
"customerRefNum": "123456789012345678901234567890"
}]
}
self.update(api, mode)
# 如果Confirm返回异常,可通过此接口查询订单状态
def update_withdraw_inquiry(self, status):
api = "/partners/v1/payment/transfer/inquiry"
if status == "success":
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": [{
"paymentInformation": [{
"originalPaymentInformationAndStatus": [{
"transactionInformation": {
"transactionStatus": "ACCC", # ACCC是成功,PDNG是处理中需要调用确认接口
"errorCode": "000",
"errorDescription": "Success",
"businessTransactionReference": "1234567890"
}
}]
}]
}]
}
elif status == "process":
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": [{
"paymentInformation": [{
"originalPaymentInformationAndStatus": [{
"transactionInformation": {
"transactionStatus": "PDNG", # ACCC是成功,PDNG是处理中需要调用确认接口
"errorCode": "000",
"errorDescription": "Success",
"businessTransactionReference": "1234567890"
}
}]
}]
}]
}
elif status == "fail":
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": [{
"paymentInformation": [{
"originalPaymentInformationAndStatus": [{
"transactionInformation": {
"transactionStatus": "RJCT", # ACCC是成功,PDNG是处理中需要调用确认接口
"errorCode": "001",
"errorDescription": "RJCT fail",
"businessTransactionReference": "1234567890"
}
}]
}]
}]
}
elif status == "500":
mode = {"_res": {"status": 500}}
else:
pass
self.update(api, mode)
# 确认放款结果,返回成功则代表放款成功
def update_withdraw_confirm(self, status):
api = "/partners/v2/payment/transfer/confirm"
if status == "success":
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": [{
"tokenizerId": "D014917201627108",
"transactionDateTime": "2021-09-17T14:49:42.489+07:00"
}]
}
else:
pass
self.update(api, mode)
def update_scb_sdk_token_generate_success(self):
api = "/v1/oauth/token"
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": {
"accessToken": "34362373-66e8-4db0-80e5-0755b67e51f9",
"tokenType": "Bearer",
"expiresIn": 1800,
"expiresAt": 1550133185,
"refreshToken": "9e80be84-5eb7-4e8c-a885-a36ff3eb6684",
"refreshExpiresIn": 3600,
"refreshExpiresAt": 1550134985
}
}
self.update(api, mode)
def update_scb_sdk_token_generate_failed(self):
api = "/v1/oauth/token"
mode = {
"status": {
"code": 9300,
"description": "Invalid authorization method for current credentials"
},
"data": None
}
self.update(api, mode)
def update_scb_sdk_generate_success(self):
api = "/v3/deeplink/transactions"
mode = {
"status": {
"code": 1000,
"description": "Deeplink successfully created"
},
"data": {
"transactionId": "2143a72e-46b4-44a6-ad5e-1e2e8a6cc1f9",
"deeplinkUrl": "scbeasysim://purchase/2143a72e-46b4-44a6-ad5e-1e2e8a6cc1f9",
"userRefId": "l765674b1b4d664bdfae7f260db219e2f1"
}
}
self.update(api, mode)
def update_scb_sdk_generate_failed(self):
api = "/v3/deeplink/transactions"
mode = {
"status": {
"code": 4101,
"description": "The feature is not supported"
},
"data": None
}
self.update(api, mode)
def update_scb_sdk_generate_service_error(self):
api = "/v3/deeplink/transactions"
mode = {
"status": {
"code": 500,
"description": "service error"
}
}
self.update(api, mode)
def update_scb_sdk_query_service_error(self):
"""statusCode = "500" => service error"""
transaction_id = "2143a72e-46b4-44a6-ad5e-1e2e8a6cc1f9"
api = "/v2/transactions/%s" % transaction_id
mode = {
"status": {
"code": 500,
"description": "service error"
}
}
self.update(api, mode)
def update_scb_sdk_query_success(self):
"""statusCode = "1" => paid"""
transaction_id = "2143a72e-46b4-44a6-ad5e-1e2e8a6cc1f9"
api = "/v2/transactions/%s" % transaction_id
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": {
"partnerId": "l7c73ac7f144fb4d22ac15371d9ba34666",
"transactionMethod": "BP",
"updatedTimestamp": "2021-01-15T09:05:58+07:00",
"fee": 0,
"statusCode": 1,
"transactionSubType": [
"BP"
],
"userRefId": "l7c73ac7f144fb4d22ac15371d9ba34666",
"sessionValidityPeriod": 300,
"transactionId": transaction_id,
"transactionType": "PURCHASE",
"billPayment": {
"receiverBankCode": "014",
"senderAccountValue": "8843410001",
"receiverAccountType": "BANKAC",
"receiverName": "biller name with len 25ha",
"senderProxyValue": "8843410001",
"receiverAccountValue": "0987654321",
"ref2": "161035588610220",
"senderBankCode": "014",
"ref1": "161035588610220",
"receiverProxyValue": "137613415414240",
"paymentAmount": 39,
"accountFrom": "8843410001",
"senderAccountType": "BANKAC",
"accountTo": "137613415414240",
"senderName": "Jasmine Golubeva",
"receiverProxyType": "BILLERID",
"ref3": "161035588610220",
"countryCode": "EN",
"senderProxyType": "ACCOUNT",
"currency": "764"
},
"partnerName": "AforT No.1",
"errorMessage": None,
"merchantMetaData": {
"merchantInfo": {
"name": "KNKNT"
},
"deeplinkUrl": "scbeasysim://purchase/f1a65e5b-1579-4777-aa72-a1483ccad9ce",
"callbackUrl": "http://cn.bing.com"
},
"paidAmount": 39,
"accountFrom": "8843410001",
"createdTimestamp": "2021-01-15T09:05:14+07:00"
}
}
self.update(api, mode)
def update_scb_sdk_query_pending(self):
"""
statusCode = 0 => pending
"""
transaction_id = "2143a72e-46b4-44a6-ad5e-1e2e8a6cc1f9"
api = "/v2/transactions/%s" % transaction_id
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": {
"partnerId": "l7c73ac7f144fb4d22ac15371d9ba34666",
"transactionMethod": None,
"updatedTimestamp": "2021-01-17T09:34:30+07:00",
"statusCode": 0,
"transactionSubType": [
"BP"
],
"userRefId": "l7c73ac7f144fb4d22ac15371d9ba34666",
"transactionId": transaction_id,
"transactionType": "PURCHASE",
"sessionValidityPeriod": 300,
"billPayment": {
"accountTo": "137613415414240",
"ref2": " 161035588610235",
"ref1": "161035588610235",
"paymentAmount": 39,
"ref3": "161035588610235",
"accountFrom": None
},
"partnerName": "AforT No.1",
"errorMessage": None,
"merchantMetaData": {
"merchantInfo": {
"name": "KNKNT"
},
"deeplinkUrl": "scbeasysim://purchase/936bedd9-50c6-4102-980f-c7acf7e94e51",
"callbackUrl": "http://cn.bing.com"
},
"paidAmount": 0,
"createdTimestamp": "2021-01-17T09:34:30+07:00",
"accountFrom": None
}
}
self.update(api, mode)
def update_scb_sdk_query_expired(self):
"""statusCode = 5 => expired"""
transaction_id = "2143a72e-46b4-44a6-ad5e-1e2e8a6cc1f9"
api = "/v2/transactions/%s" % transaction_id
mode = {
"status": {
"code": 1000,
"description": "Success"
},
"data": {
"partnerId": "l765674b1b4d664bdfae7f260db219e2f1",
"transactionMethod": None,
"updatedTimestamp": "2021-01-13T15:33:11+07:00",
"creditCardFullAmount": {
"orderReference": "000000000000002",
"terminalId": "589432760047346",
"paymentAmount": 700,
"merchantId": "915815844712810"
},
"statusCode": 5,
"transactionSubType": [
"BP"
],
"userRefId": "l765674b1b4d664bdfae7f260db219e2f1",
"sessionValidityPeriod": 300,
"transactionId": transaction_id,
"transactionType": "PURCHASE",
"billPayment": {
"accountTo": "851737440782289",
"ref2": "000000000000002",
"ref1": "000000000000002",
"paymentAmount": 700,
"ref3": "000000000000002",
"accountFrom": None
},
"partnerName": "My Test App",
"errorMessage": None,
"merchantMetaData": {
"merchantInfo": {
"name": "KN TEST ENV"
},
"deeplinkUrl": "scbeasysim://purchase/2143a72e-46b4-44a6-ad5e-1e2e8a6cc1f9",
"callbackUrl": "http://cn.bing.com"
},
"paidAmount": 0,
"accountFrom": None,
"createdTimestamp": "2021-01-13T15:27:09+07:00"
}
}
self.update(api, mode)
if __name__ == "__main__":
pass
| xiujingyuan/framework-test | biztest/util/easymock/global_payment/global_payment_scb_mock.py | global_payment_scb_mock.py | py | 15,904 | python | en | code | 0 | github-code | 50 |
13943371507 | import random
from operator import itemgetter
from copy import deepcopy
# Defining a TrieNode type to use to build up the Trie
# This version is slightly modified to construct the Trie for text not multiple patterns
class TrieNode:
# id label counter for inserting nodes
id_count = 0
def __init__(self, character, depth, position=None):
self.id = TrieNode.id_count
self.char = character # the character stored in this node
self.pos = position # position of this node in text
self.length = None
# This is used only for leaf nodes to assign starting pos of this substring
self.label = None
# Added for the longest repeat problem
self.depth = depth
# dictionary to store children (other TrieNodes) connected to this one
self.children = {}
# Increment the label number, the 'id'
TrieNode.id_count += 1
class Trie(object):
def __init__(self):
# Create a root TrieNode with an empty char and ID 0, at level 0
self.root = TrieNode('', 0)
# Construct the full Trie from text
def construct(self, text):
n = len(text)
for i in range(0, n):
current_node = self.root
for j in range(i, n):
current_symbol = text[j]
if current_symbol in current_node.children:
current_node = current_node.children[current_symbol]
# If the character is not found, create a new node in the trie
else:
# For the longest repeat challenge, add the level information
# to each new node added
depth = current_node.depth + 1
new_node = TrieNode(current_symbol, depth, j)
current_node.children[current_symbol] = new_node
current_node = new_node
if len(current_node.children) == 0:
self.label = i
def print_tree_nodes(self, node):
for n in node.children:
print('Node pos: ' + str(node.children[n].pos) + ' and ' \
+ str(node.children[n].length) + ' depth is ' + str(node.children[n].depth))
self.print_tree_nodes(node.children[n])
def print_tree_symbols(self, node):
for n in node.children:
print('Node id: ' + str(node.children[n].id) + ' node s: ' + str(node.children[n].char))
self.print_tree_symbols(node.children[n])
def print_tree_strings(self, node, text):
for n in node.children:
i = node.children[n].pos
j = node.children[n].pos + node.children[n].length
print(text[i:j])
self.print_tree_strings(node.children[n], text)
def read_input(filename):
try:
input_file = open(filename, "r")
text = input_file.readline().rstrip('\n')
input_file.close()
except IOError as e:
print(e)
return text
def modified_trie_construction(text):
trie = Trie()
trie.construct(text)
return trie
def modified_suffix_tree_construction(node):
for n in node.children:
path = []
current_node = node.children[n]
path.append(current_node)
non_branching = True
while non_branching:
try:
if len(current_node.children) == 0:
# we hit a leaf, so substitute all nodes along the path by
# a single node
path_len = len(path)
path[0].length = path_len
path[0].children.clear()
break
elif len(current_node.children) == 1:
next = list(current_node.children.items())[0] # only 1 item
current_node = current_node.children[next[0]]
path.append(current_node) # add new node to our path
else:
path_len = len(path)
path[0].length = path_len
modified_suffix_tree_construction(current_node)
temp = deepcopy(current_node)
# when we clear, python garbage collection delete the nodes
# from memory, therefore we make a temp copy of the node
path[0].children.clear()
for child in temp.children:
temp.children[child].depth = path[0].depth + 1
path[0].children[child] = temp.children[child]
break
except KeyError as e:
print('Something wrong', e)
###################################################
# THIS CODE FAILS VERY RARELY FOR SOME REASON I CANNOT FIGURE OUT
def longest_repeat(node, text, repeated_patterns, current_pattern):
copy = current_pattern
for i, n in enumerate(node.children):
if node.id == 0:
current_pattern = ''
elif len(node.children) > 2:
current_pattern = copy
if len(node.children[n].children) == 0:
repeated_patterns.add(current_pattern)
continue
start = node.children[n].pos
end = node.children[n].pos + node.children[n].length
current_pattern = current_pattern + text[start:end]
repeated_patterns = longest_repeat(node.children[n], text, repeated_patterns, current_pattern)
return repeated_patterns
def start():
text = read_input("dataset.txt")
text = text + '$'
trie = modified_trie_construction(text)
modified_suffix_tree_construction(trie.root) # convert trie to tree
#trie.print_tree_nodes(trie.root)
#trie.print_tree_symbols(trie.root)
repeated_patterns = set()
current_pattern = ''
result = longest_repeat(trie.root, text, repeated_patterns, current_pattern)
print(max(result, key=len))
if __name__ == '__main__':
start()
| kaust-cs249-2020/MOHSHAMMASI-CS249-BIOINFORMATICS | Chapter-9/LongestRepeat_section5.py | LongestRepeat_section5.py | py | 5,905 | python | en | code | 0 | github-code | 50 |
18221299914 | class Node:
def __init__(self, value):
self.value = value
self.next = None
# ***********************************************************************************************************************
class SLL:
def __init__(self):
self.head = None
def append(self,value):
if self.head == None:
newnode = Node(value)
self.head = newnode
else:
runner = self.head
while runner.next != None:
runner = runner.next
newnode = Node(value)
runner.next = newnode
return self
def addFront(self,value):
newnode = Node(value)
if self.head == None:
self.head = newnode
else:
#runner is created to have a variable i can use to iterate to singly linked list
runner = self.head
#use a while loop to iterate
print(runner.next) #this would print a node object
while runner.next != None:
runner = runner.next
runner.next = newnode
return self
def display(self):
runner = self.head
output = ""
while (runner != None):
output += f"{runner.value}-->"
runner = runner.next
print (output)
return self
def removefront(self):
if self.head==None:
return self
else:
self.head=self.head.next
return self
def removeback(self):
runner=self.head
while runner.next.next != None: #if this condition match, this will run, if not it breakout
runner = runner.next
runner.next= None
return self
def movemintofront(self):
minval = self.head.value
rnr = self.head
while rnr.next:
if rnr.next.value < minval:
minval = rnr.next.value
nodeBeforeMin = rnr
minNode = rnr.next
rnr = rnr.next
nodeBeforeMin.next = nodeBeforeMin.next.next
minNode.next = self.head
self.head = minNode
return self
# ***********************************************************************************************************************
class Queue:
def __init__(self):
self.front = None
self.back = None
def enqueue(self, value):
newnode = Node(value)
if self.front == None:
self.front = newnode
self.back = newnode
else:
self.back.next = newnode
self.back = self.back.next
return self
def dequeue(self):
if self.front == None:
return None
else:
frontToDequeue = self.front.value
self.front = self.front.next
# frontToDequeue.next = None
return frontToDequeue
def front(self):
if self.front != None:
return self.front.value
else:
return None
def contains(self, valueToFind):
if self.front == None:
return False
else:
runner = self.front
while runner != None:
if runner.value == valueToFind:
print("true")
return True
else:
runner = runner.next
print("false")
return False
def isEmpty(self):
if self.front == None:
return True
else:
return False
def size(self):
count = 0
if self.front == None:
return count
else:
runner = self.front
while runner != None:
count += 1
runner = runner.next
print(count)
return count
def putNodeIntoArray(self):
array=[]
runner=self.front
if self.front== None:
return array
else:
while runner.next != None:
array.append(runner.value)
runner=runner.next
array.append(runner.value)
return array
def display(self):
if self.front== None:
print('this stack is empty')
else:
runner=self.front
output=""
while runner.next != None:
output+= f"{runner.value}-->"
runner=runner.next
output+= f"{runner.value}-->"
print(output)
return self
# ***********************************************************************************************************************
class Stack:
def __init__(self):
self.top = None
def push(self,val):
newnode= Node(val)
if self.top == None:
self.top = newnode
else:
newnode.next=self.top
self.top=newnode
return self
def pop (self):
if self.top==None:
return None
else:
popvalue=self.top.value
self.top=self.top.next
return popvalue
def display(self):
if self.top== None:
print('this stack is empty')
else:
runner=self.top
output=""
while runner.next != None:
output+= f"{runner.value}-->"
runner=runner.next
output+= f'{runner.value}-->'
print(output)
return self
def size(self):
count = 0
if self.top == None:
return count
else:
runner = self.top
while runner != None:
count += 1
runner = runner.next
# ***********************************************************************************************************************
def comparestack2(s1,s2):
if s1.size()==s2.size():
runner1=s1.top
runner2=s2.top
while runner1!=None:
if runner1 != runner2:
print('same size but value is not equal')
return False
else:
runner1 = runner1.next
runner2 = runner2.next
print('yes! they are equal')
return True
else:
print ('not the same size')
return False
# def oueuePalimdrom(queue):
# runner1=queue.front
# if runner1 == None:
# return 'this queue is empty'
# else:
# while runner1.next != None:
# runner1=runner1.next
def palimdromes(array):
for i in range (0,int(len(array)/2),1):
if array[i]!= array[len(array)-1-i]:
print('this is not palimdromes')
return False
else:
print('Palimdromes!!!')
return True
def queuePalimdrom(queue):
array= queue.putNodeIntoArray()
a=palimdromes(array)
return a
# newQueue = Queue()
# queue2 = Queue()
# queue2.dequeue()
# newQueue.enqueue(1).enqueue(2).enqueue(3).display()
# # queuePalimdrom(newQueue)
# newStack=Stack()
# newStack.push(1).push(2).push(3).display()
def TwoStackIntoOneQueue(s1,s2):
q=Queue()
# q.enqueue(s1).enqueue(s2)
runner1=s1.top
runner2=s2.top
while runner1!=None:
a=runner1.value
q.enqueue(a)
runner1 = runner1.next
while runner2!=None:
a=runner2.value
q.enqueue(a)
runner2 = runner2.next
print(q)
return q
def TwoStackIntoOneQueue_2(s1,s2):
q=Queue()
# q.enqueue(s1).enqueue(s2)
runner1=s1.top
runner2=s2.top
while runner1!=None and runner2!=None:
a=s1.pop()
b=s2.pop()
q.enqueue(a).enqueue(b)
runner1 = runner1.next
runner2 = runner2.next
if runner1!=None:
the_rest=s1
elif runner2!=None:
the_rest=s2
runner=the_rest.top
while runner!=None:
a=the_rest.pop()
q.enqueue(a)
runner = runner.next
return q
#in the function TwoStackIntoOneQueue_2(), the 2 input stack become empty.Use for loop instead of while loop when you want keep the value of 2 stack.
s1=Stack()
s2=Stack()
s1.push(5).push(4).push(8).push(9).display()
s2.push(1).push(3).push(8).display()
a=TwoStackIntoOneQueue_2(s1,s2)
a.display()
s1.display()
s2.display()
| Kenjilam92/algorithm | April/SLL-Queue-Stack.py | SLL-Queue-Stack.py | py | 8,268 | python | en | code | 0 | github-code | 50 |
75284656475 | # imports
import math
import cv2
import mediapipe as mp
import numpy as np
from keras.models import load_model
from keras.utils.image_utils import img_to_array
import pyaudio
import audioop
import tkinter as tk
# init the audio
audio = pyaudio.PyAudio()
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
CHUNK = 1024
THRESHOLD = 1000
HIGH = "high"
LOW = "low"
CURRENT_EMOTION = "neutrual"
# defining functions
# Printing emotion to the screen
def print_emotion_to_console(emotion_label):
print("Detected emotion:", emotion_label)
# defining angles using math import
def get_angle(x1, y1, x2, y2):
return math.degrees(math.atan2(y2 - y1, x2 - x1))
def detect_head_turn(angle):
if angle < -5:
return "Right"
elif angle > 5:
return "Left"
else:
return "Center"
def print_head_turn_to_console(direction):
print("Head turned:", direction)
def get_voice_level():
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
data = stream.read(CHUNK)
rms = audioop.rms(data, 2) # calculate the RMS amplitude
return rms
# create blank canvas window
canvas = tk.Canvas(width=1000, height=1000)
canvas.pack()
# dictionary that maps emotion to image path
emotion_images = {
"Angry": "Character/angry.png",
"Excited": "Character/excited.png",
"Upset": "Character/sad.png",
"Happy": "Character/happy.png",
"Neutral": "Character/neutral.png",
"AngryLeft": "Character/angryLeft.png",
"ExcitedLeft": "Character/excitedLeft.png",
"NeutralLeft": "Character/neutralLeft.png",
"NeutralRight": "Character/neutralRight.png",
"UpsetLeft": "Character/sadLeft.png",
"HappyLeft": "Character/happyLeft.png",
"AngryRight": "Character/angryRight.png",
"ExcitedRight": "Character/excitedRight.png",
"UpsetRight": "Character/sadRight.png",
"HappyRight": "Character/happyRight.png",
"Neutral Loud": "Character/neutralLoud.png",
"Neutral Loud Left": "Character/neutralLoudLeft.png",
"Neutral Loud Right": "Character/neutralLoudRight.png",
}
# load images into dictionary
loaded_images = {}
for emotion, image_path in emotion_images.items():
loaded_images[emotion] = tk.PhotoImage(file=image_path)
loaded_images[emotion] = loaded_images[emotion].subsample(2) # resize the image to half its size
mp_drawing = mp.solutions.drawing_utils
mp_face_detection = mp.solutions.face_detection
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
emotion_model = load_model('emotion_models.h5')
emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
with mp_face_detection.FaceDetection(
min_detection_confidence=0.5) as face_detection:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Could not find camera, please add")
continue
image.flags.writeable = False
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = face_detection.process(image_rgb)
image.flags.writeable = True
image = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
# to retrieve the voice level
voice_level = get_voice_level()
if results.detections:
for detection in results.detections:
mp_drawing.draw_detection(image, detection)
bboxC = detection.location_data.relative_bounding_box
ih, iw, _ = image.shape
x, y, w, h = int(bboxC.xmin * iw), int(bboxC.ymin * ih), int(bboxC.width * iw), int(bboxC.height * ih)
face_region = image[y:y+h, x:x+w]
# Get the eye landmarks
landmarks = detection.location_data.relative_keypoints
left_eye = [landmarks[0].x * iw, landmarks[0].y * ih]
right_eye = [landmarks[1].x * iw, landmarks[1].y * ih]
# Calculate the angle between the eyes
angle = get_angle(left_eye[0], left_eye[1], right_eye[0], right_eye[1])
# Detect head turn direction
head_turn = detect_head_turn(angle)
print_head_turn_to_console(head_turn)
if face_region.shape[0] != 0 and face_region.shape[1] != 0:
face_region = cv2.resize(face_region, (48, 48))
face_region_gray = cv2.cvtColor(face_region, cv2.COLOR_BGR2GRAY)
face_region_array = img_to_array(face_region_gray)
face_region_array = np.expand_dims(face_region_array, axis=0)
face_region_array /= 255
emotion_prediction = emotion_model.predict(face_region_array, verbose=0)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_label = emotions[emotion_label_arg]
font = cv2.FONT_HERSHEY_SIMPLEX
org = (x, y - 10)
fontScale = 0.75
color = (0, 255, 0)
thickness = 2
image = cv2.putText(image, emotion_label, org, font, fontScale, color, thickness, cv2.LINE_AA)
# print_emotion_to_console(emotion_label)
# Depending ont he head direction the emotion and position of the avatar will change ---------------------------
if head_turn == 'Center':
if emotion_label == 'Angry' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'Angry'
elif emotion_label == 'Happy' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'Excited'
elif emotion_label == 'Angry' and voice_level < THRESHOLD:
CURRENT_EMOTION = 'Upset'
elif emotion_label == 'Happy' and voice_level < THRESHOLD:
CURRENT_EMOTION = 'Happy'
elif emotion_label == 'Neutral' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'Neutral Loud'
else:
CURRENT_EMOTION = 'Neutral'
if head_turn == 'Left':
if emotion_label == 'Angry' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'AngryLeft'
elif emotion_label == 'Happy' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'ExcitedLeft'
elif emotion_label == 'Angry' and voice_level < THRESHOLD:
CURRENT_EMOTION = 'UpsetLeft'
elif emotion_label == 'Happy' and voice_level < THRESHOLD:
CURRENT_EMOTION = 'HappyLeft'
elif emotion_label == 'Neutral' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'Neutral Loud Left'
else:
CURRENT_EMOTION = 'NeutralLeft'
if head_turn == 'Right':
if emotion_label == 'Angry' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'AngryRight'
elif emotion_label == 'Happy' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'ExcitedRight'
elif emotion_label == 'Angry' and voice_level < THRESHOLD:
CURRENT_EMOTION = 'UpsetRight'
elif emotion_label == 'Happy' and voice_level < THRESHOLD:
CURRENT_EMOTION = 'HappyRight'
elif emotion_label == 'Neutral' and voice_level > THRESHOLD:
CURRENT_EMOTION = 'Neutral Loud Right'
else:
CURRENT_EMOTION = 'NeutralRight'
# --------------------------------------------------------------------------------------------------------------
# Display The Emotion to the console ---------------------------------------------------------------------------
print(CURRENT_EMOTION)
# --------------------------------------------------------------------------------------------------------------
# show image corresponding to CURRENT_EMOTION in canvas --------------------------------------------------------
if CURRENT_EMOTION in loaded_images:
canvas.delete("all")
canvas.create_image(250, 250, image=loaded_images[CURRENT_EMOTION])
canvas.update()
# --------------------------------------------------------------------------------------------------------------
cv2.imshow('MediaPipe Face Detection with Emotion Detection', image)
key = cv2.waitKey(1)
if key == ord('q'):
break
# --------------------------------------------------------------------------------------------------------------
cap.release()
cv2.destroyAllWindows() | T4JsaysHello/Facial-Emotion-Recognition | main.py | main.py | py | 8,831 | python | en | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.