index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
14,980
|
roypel/BaMMI
|
refs/heads/master
|
/BaMMI/saver/__main__.py
|
import sys
import click
from .Saver import Saver
from ..utils.Constants import mongodb_url
from ..utils.CLITemplate import log, main
@main.command()
@click.option('-d', '--database', default=mongodb_url, type=str)
@click.argument('topic-name', type=str)
@click.argument('raw-data-path', type=click.Path(exists=True))
def save(database, topic_name, raw_data_path):
saver = Saver(database)
log(saver.save(topic_name, raw_data_path))
@main.command('run-saver')
@click.argument('db_url', type=str)
@click.argument('mq_url', type=str)
def run_saver(db_url, mq_url):
saver = Saver(db_url)
log(saver.consume_topics(mq_url))
if __name__ == '__main__':
try:
main(prog_name='saver', obj={})
except Exception as error:
log(f'ERROR: {error}')
sys.exit(1)
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,981
|
roypel/BaMMI
|
refs/heads/master
|
/BaMMI/utils/drivers/db_drivers/MongoDriver.py
|
from pymongo import MongoClient, ASCENDING, UpdateOne
class MongoDriver:
def __init__(self, url, db_name="bammi_data", table_name="users_and_snapshots"):
self.client = MongoClient(url)
self.db = self.client[db_name]
self.table_name = self.db[table_name]
def insert_single_data_unit(self, data):
self.table_name.insert_one(data)
def insert_many_data_units(self, data_list):
self.table_name.insert_many(data_list)
def upsert_data_unit(self, key, data):
self.table_name.update_one(key, data, upsert=True)
def create_index_for_id(self, key_name, *args, **kwargs):
self.table_name.create_index([(key_name, ASCENDING)], *args, **kwargs)
def query_data(self, query=None, *args, **kwargs):
return self.table_name.find(query, *args, **kwargs)
def insert_snapshot_data_by_user(self, user_data, snapshot_data, field_name):
# Idea for array upsert taken from https://stackoverflow.com/questions/22664972/mongodb-upsert-on-array
user_id = user_data['user_id']
operations = [
# If the document doesn't exist at all, insert it
UpdateOne({'user_id': user_id},
{
'$setOnInsert': {
**{k: v for k, v in user_data.items()},
'snapshots': [{'datetime': snapshot_data['datetime']}]
}
},
upsert=True
),
# If the document exists, update it
UpdateOne({'user_id': user_id,
'snapshots': {
'$elemMatch': {
'datetime': snapshot_data['datetime']
}
}
},
{
'$set':
{
f'snapshots.$.{field_name}': snapshot_data[field_name]
}
}
),
# If an array element doesn't exist, add it. Won't conflict with the update a step before
UpdateOne({'user_id': user_id, 'snapshots.datetime': snapshot_data['datetime']},
{
'$addToSet': {
'snapshots': {
field_name: snapshot_data[field_name]
}
}
})
]
self.table_name.bulk_write(operations)
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,982
|
roypel/BaMMI
|
refs/heads/master
|
/BaMMI/server/Server.py
|
from ..utils.APIServer import FlaskWrapper
from ..server.Receiver import Receiver, publish_to_message_queue
def run_server(host='127.0.0.1', port=8000, publish=publish_to_message_queue):
url_prefix = '/uploads'
app = FlaskWrapper('server')
receiver = Receiver(publish)
app.add_endpoint(f'{url_prefix}/config', 'config', receiver.send_server_supported_fields, methods=['GET'])
app.add_endpoint(f'{url_prefix}/users', 'user_upload', receiver.receive_user_data, methods=['POST'])
app.add_endpoint(f'{url_prefix}/snapshots', 'snapshots_upload', receiver.receive_snapshot_data,
methods=['POST'])
app.run(host=host, port=port)
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,983
|
roypel/BaMMI
|
refs/heads/master
|
/BaMMI/server/Receiver.py
|
import json
from flask import jsonify, request
from google.protobuf.json_format import MessageToDict
import numpy as np
from ..utils import UtilFunctions
from ..utils.BaMMI_pb2 import Snapshot, User
from ..utils.Constants import rabbit_mq_url
from ..utils.PubSuber import PubSuber
def publish_to_message_queue(user_data, snapshot, binary_type_data, array_type_data,
message_queue_url=rabbit_mq_url):
data_to_publish = prepare_data_for_queue(user_data['user_id'], snapshot, binary_type_data, array_type_data)
publisher = PubSuber(message_queue_url) # TODO: Make it so we don't connect to the MQ each time, wrap in a class?
publisher.init_exchange('snapshots_data', exchange_type='topic')
publisher.publish_message(json.dumps({'user_data': user_data, 'snapshot_data': data_to_publish}),
'.'.join(data_to_publish.keys()))
def convert_binary_fields_to_files(user_id, data, binary_type_data, array_type_data):
field_to_file_path = {}
for field in data.ListFields():
field_name = field[0].name
if field_name in [*binary_type_data, *array_type_data]:
field_data = field[1].data
file_path = UtilFunctions.build_path_for_files_from_data(
UtilFunctions.get_true_relative_path(__file__, '../storage'),
user_id, str(data.datetime), '.'.join((field_name, 'raw')))
if field_name in binary_type_data:
UtilFunctions.save_data_to_file(field_data, file_path, 'b')
else:
array_data = np.array(field_data, dtype=float)
array_data.astype('float').tofile(file_path)
field_to_file_path[field_name] = file_path
return field_to_file_path
def prepare_data_for_queue(user_id, data, binary_type_data, array_type_data):
file_paths_data = convert_binary_fields_to_files(user_id, data, binary_type_data, array_type_data)
data_to_publish = MessageToDict(data, preserving_proto_field_name=True)
for field in file_paths_data:
data_to_publish[field]['data'] = file_paths_data[field]
return data_to_publish
# TODO: In case data comes in other formats, convert it to a valid json
class Receiver:
def __init__(self, publish_function):
self.publish_function = publish_function
self.message_type_data = ['pose', 'feelings', 'datetime']
self.binary_type_data = ['color_image']
self.array_type_data = ['depth_image']
self.known_users = {}
def send_server_supported_fields(self):
return jsonify([*self.message_type_data, *self.binary_type_data, *self.array_type_data])
def receive_user_data(self):
user_data = request.data
user = User()
user.ParseFromString(user_data)
user_dict = MessageToDict(user, preserving_proto_field_name=True)
for field in user.DESCRIPTOR.fields:
if field.name not in user_dict:
# Handling case where zero-value enums are omitted - https://github.com/golang/protobuf/issues/258
user_dict[field.name] = 0
self.known_users[str(user.user_id)] = user_dict
return jsonify(success=True)
def receive_snapshot_data(self):
user_id = request.headers.get('user-id')
snapshot_data = request.data
snapshot = Snapshot()
snapshot.ParseFromString(snapshot_data)
self.publish_function(self.known_users[user_id], snapshot, self.binary_type_data, self.array_type_data)
return jsonify(success=True)
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,984
|
roypel/BaMMI
|
refs/heads/master
|
/BaMMI/client/ProtoDriver.py
|
import gzip
import struct
from ..utils.BaMMI_pb2 import User, Snapshot
class ProtoDriver:
def __init__(self, file_path):
self.f = gzip.open(file_path, 'rb')
self.user = None
def close(self):
if self.f:
self.f.close()
self.f = None
def get_user_data(self):
if self.user is None and not self.f:
raise RuntimeError("User data wasn't saved before file closed")
if self.user is None: # If we got here, self.f is already opened
user_data_length = _read_message_length(self.f)
user = User()
user.ParseFromString(self.f.read(user_data_length))
self.user = user
return self.user
def get_user_data_ready_to_send(self):
return self.get_user_data().SerializeToString()
def generate_snapshot_data_ready_to_send(self, server_accepted_fields):
while self.f:
snapshot_length = _read_message_length(self.f)
if snapshot_length:
snapshot = Snapshot()
snapshot.ParseFromString(self.f.read(snapshot_length))
for field in snapshot.ListFields():
field_name = field[0].name
if field_name not in server_accepted_fields:
snapshot.ClearField(field_name)
yield snapshot.SerializeToString()
else: # EOF reached, no more snapshots
return
@staticmethod
def get_data_content_type():
return 'application/protobuf'
def _read_message_length(f):
return _read_bytes_as_format_from_file(f, 4, 'I')
def _read_bytes_as_format_from_file(f, num_of_bytes, bytes_format, endian='little'):
"""
A relic from a time where reading binary data was the norm.
Helper function to read bytes from a file and parse them according to the given format.
:param f: An open file to read bytes from.
:param num_of_bytes: The number of bytes that is required to read.
:param bytes_format: The format which the bytes aligned to know how to unpack them.
:param endian: little/big, according to the data endianness.
:return: The data in the file according to the arguments given.
"""
if endian.lower() == 'little':
endian = '<'
elif endian.lower() == 'big':
endian = '>'
else:
raise ValueError("Endian should be 'little' or 'big'")
bts = f.read(num_of_bytes)
if len(bts) < num_of_bytes:
return None
return struct.unpack(f'{endian}{bytes_format}', bts)[0]
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,985
|
roypel/BaMMI
|
refs/heads/master
|
/setup.py
|
from setuptools import setup, find_packages
setup(
name='BaMMI',
version='0.1.9',
author='Roy Peleg',
description='Basic Mind-Machine Interface.',
packages=find_packages(where='BaMMI'),
package_dir={"": "BaMMI"},
install_requires=[
'Click==7.0',
'codecov==2.0.16',
'Flask==1.1.1',
'matplotlib==3.2.1',
'numpy==1.22.0',
'pika==1.1.0',
'Pillow==9.0.1',
'protobuf==3.15.0',
'pytest==5.3.2',
'pytest-cov==2.8.1',
'pymongo==3.10.1',
'requests==2.23.0'
],
tests_require=['pytest', 'pytest-cov'],
python_requires='>=3.8',
)
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,986
|
roypel/BaMMI
|
refs/heads/master
|
/BaMMI/utils/Constants.py
|
storage_folder = 'BaMMI/storage'
mongodb_url = "mongodb://BaMMI:1337@mongo:27017"
rabbit_mq_url = 'rabbitmq://rabbitmq:5672/'
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,987
|
roypel/BaMMI
|
refs/heads/master
|
/BaMMI/client/__main__.py
|
import sys
import click
from .client import upload_sample as upload
from ..utils.CLITemplate import log, main
@main.command()
@click.option('-h', '--host', default='127.0.0.1', type=str)
@click.option('-p', '--port', default=8000, type=int)
@click.argument('path', default='snapshot.mind.gz', type=click.Path(exists=True))
def upload_sample(host='127.0.0.1', port=8000, path='sample.mind.gz'):
log(upload(host, port, path))
if __name__ == '__main__':
try:
main(prog_name='client', obj={})
except Exception as error:
log(f'ERROR: {error}')
sys.exit(1)
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,988
|
roypel/BaMMI
|
refs/heads/master
|
/BaMMI/parsers/__main__.py
|
import sys
import click
from . import ParserHandler
from ..utils.CLITemplate import log, main
parser_handler = ParserHandler.ParserHandler()
@main.command()
@click.argument('parser_name', type=str)
@click.argument('raw_data_path', type=click.Path(exists=True))
def parse(parser_name, raw_data_path):
log(parser_handler.parse(parser_name, raw_data_path))
@main.command('run-parser')
@click.argument('parser_name', type=str)
@click.argument('mq_url', type=str)
def run_parser(parser_name, mq_url):
log(parser_handler.run_parser(parser_name, mq_url))
if __name__ == '__main__':
try:
main(prog_name='parsers', obj={})
except Exception as error:
log(f'ERROR: {error}')
sys.exit(1)
|
{"/BaMMI/saver/Saver.py": ["/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/parsers/__init__.py": ["/BaMMI/parsers/ParserHandler.py"], "/BaMMI/api/__main__.py": ["/BaMMI/api/API.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/server/__main__.py": ["/BaMMI/server/Receiver.py", "/BaMMI/server/Server.py", "/BaMMI/utils/CLITemplate.py", "/BaMMI/utils/Constants.py"], "/BaMMI/utils/drivers/mq_drivers/__init__.py": ["/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py"], "/BaMMI/parsers/Context.py": ["/BaMMI/utils/__init__.py"], "/BaMMI/parsers/ParserHandler.py": ["/BaMMI/parsers/Context.py", "/BaMMI/utils/PubSuber.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/PubSuber.py": ["/BaMMI/utils/drivers/mq_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/utils/__init__.py": ["/BaMMI/utils/Connection.py"], "/BaMMI/api/API.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/DBWrapper.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/client/Reader.py": ["/BaMMI/client/ProtoDriver.py"], "/BaMMI/utils/DBWrapper.py": ["/BaMMI/utils/drivers/db_drivers/__init__.py", "/BaMMI/utils/UtilFunctions.py"], "/BaMMI/__init__.py": ["/BaMMI/server/Server.py", "/BaMMI/client/Reader.py"], "/BaMMI/utils/drivers/db_drivers/__init__.py": ["/BaMMI/utils/drivers/db_drivers/MongoDriver.py"], "/BaMMI/client/client.py": ["/BaMMI/client/Reader.py", "/BaMMI/utils/Connection.py"], "/BaMMI/saver/__main__.py": ["/BaMMI/saver/Saver.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/server/Server.py": ["/BaMMI/utils/APIServer.py", "/BaMMI/server/Receiver.py"], "/BaMMI/server/Receiver.py": ["/BaMMI/utils/__init__.py", "/BaMMI/utils/Constants.py", "/BaMMI/utils/PubSuber.py"], "/BaMMI/client/__main__.py": ["/BaMMI/client/client.py", "/BaMMI/utils/CLITemplate.py"], "/BaMMI/parsers/__main__.py": ["/BaMMI/parsers/__init__.py", "/BaMMI/utils/CLITemplate.py"]}
|
14,992
|
EduardaMarques/AgendaContatoApp
|
refs/heads/master
|
/model/Agenda.py
|
from model.Contato import Contato
class Agenda ():
def __init__(self, propietario):
self.propietario = propietario
self.contatos = []
def contarContatos(self):
return len(self.contatos)
def incluirContato(self,contato):
self.contatos.append(contato)
def listarContato(self):
return self.contatos
def excluirContato(self,nome):
auxiliar = 0
for contato in self.contatos:
if (contato.pessoa.nome == nome):
del self.contatos[auxiliar]
print("contato: " + nome + " removido com sucesso\n")
break
auxiliar +=1
|
{"/model/Agenda.py": ["/model/Contato.py"], "/App.py": ["/model/Agenda.py", "/model/Contato.py"]}
|
14,993
|
EduardaMarques/AgendaContatoApp
|
refs/heads/master
|
/AGENDA/ClassPessoa.py
|
Python 3.6.1 (v3.6.1:69c0db5, Mar 21 2017, 17:54:52) [MSC v.1900 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> class Pessoa():
def __init__(self, nome, nascimento, email):
self.nome = nome
self.nascimento = nascimento
self.email = email
|
{"/model/Agenda.py": ["/model/Contato.py"], "/App.py": ["/model/Agenda.py", "/model/Contato.py"]}
|
14,994
|
EduardaMarques/AgendaContatoApp
|
refs/heads/master
|
/App.py
|
# encoding: utf-8
from model.Agenda import Agenda
from model.Contato import Contato
from model.Pessoa import Pessoa
from model.Telefone import Telefone
import json
import datetime
from datetime import date
def CriarAgenda():
while(True):
try:
print(">---Criar Nova Agenda---<")
nome = str(input("\nDigite o nome do propietário da agenda:\n"))
dia = int(input("Digite o dia do seu nascmento:"))
mes = int(input("Digite o numero do mes em que você nasceu:"))
ano = int(input("Digite o ano em que você nasceu:"))
nascimento = date(ano,mes,dia)
email = str(input("Digite o email do propietario: "))
propietario = Pessoa(nome,nascimento,email)
agenda = Agenda(propietario)
return agenda
except ValueError:
print("\nVocê digitou um valor inválido tente novamente\nDica: Digite um valor inteiro e um dia possivel no ano:")
def incluirContato(agenda):
#Criando Objeto Pessoa
nome = str(input("Digite o nome do conntato:"))
trueAuxiliar = True
telefones = []
while(trueAuxiliar):
try:
dia = int(input("Digite o dia do seu nascmento:"))
mes = int(input("Digite o numero do mes em que você nasceu:"))
ano = int(input("Digite o ano em que você nasceu:"))
nascimento = date(ano,mes,dia)
email = str(input("Digite o email do contato:"))
pessoa = Pessoa(nome,nascimento,email)
#Criando Objeto Telefone
print("\nAdicionar Telefone:")
numero = int(input("\nDigite o numero do telefone:"))
ddd = int(input("Digite o ddd do telefone:"))
codigoPais = int(input("Digie o codigo do Pais\n"))
telefones.append(Telefone(numero,ddd,codigoPais))
trueAuxiliar = False
except ValueError:
print("\n!!!Valor Inválido, Dica: Digite um valor inteiro e que esteja dentro da quantidade de meses e dias possiveis!!!\n")
#Criacao do objeto Telefone
while(True):
try:
resposta = int(input("Digite:\n 1-Incluir outro telefone;\n 2- Sair salvar contato."))
if (resposta == 1):
print("Adicionar outro telefone")
numero = int(input("\nDigite o numero do telefone:"))
ddd = int(input("Digite o ddd do telefone:"))
codigoPais = int(input("Digie o codigo do Pais\n"))
telefones.append(Telefone(numero,ddd,codigoPais))
elif (resposta == 2):
break
else:
print("\n!!!Resposta Inválida!!!\n")
except ValueError:
print("\n!!!Digite um número INTEIRO!!!\n")
#Instanciação e inclusao de contato
criacao = date.today()
contato = Contato(criacao,pessoa,telefones)
agenda.incluirContato(contato)
def listarContatos(agenda):
lista = agenda.listarContato()
for contato in lista:
print("Contato: " +contato.pessoa.nome)
print("nasc: " + str(contato.pessoa.nascimento))
print("email: " + contato.pessoa.email)
contador = 1
print("::::TELEFONES::::")
for telefone in contato.telefones:
print("\nTelefone" + str(contador))
print("nº:",telefone.numero)
print("DDD:",telefone.ddd)
print("Codigo do País:",telefone.codigoPais)
def removerContato(agenda):
nome = str(input("Digite o nome do contato a ser removido:"))
agenda.excluirContato(nome)
def buscarContato(agenda):
nome = str(input("Digite o nome do contato a ser buscado:"))
auxiliar = 0
for contato in agenda.contatos:
if (contato.pessoa.nome == nome):
print("Contato: " +contato.pessoa.nome)
print("nasc: " + str(contato.pessoa.nascimento))
print("email: " + contato.pessoa.email)
contador = 1
print("::::TELEFONES::::")
for telefone in contato.telefones:
print("\nTelefone" + str(contador))
print("nº:",telefone.numero)
print("DDD:",telefone.ddd)
print("Codigo do País:",telefone.codigoPais)
break
auxiliar +=1
def quantidadeContatos(agenda):
print(agenda.contarContatos())
def default_parser(obj):
if getattr(obj, "__dict__",None):
return obj.__dict__
elif type(obj) == datetime:
return obj.isoformat()
else:
return str(obj)
def extrairSalvar(agenda):
#Transformando Contato
auxiliarContato = 0
salvarAgenda = str(json.dumps(agenda,default = default_parser,indent=4))
arquivo = open("agenda.json", "w",encoding="utf-8")
arquivo.write(salvarAgenda)
arquivo.close()
def main():
agenda = None
continuar = True
telefones = []
try:
arquivo = open("agenda.json" ,"r",encoding="utf-8")
jsontest = json.loads(arquivo.read())
arquivo.close()
contatos = []
for contato in jsontest["contatos"]:
for telefone in contato["telefones"]:
numero = telefone["numero"]
ddd = telefone["ddd"]
codigoPais = telefone["codigoPais"]
telefone = Telefone(numero,ddd,codigoPais)
telefones.append(telefone)
criacao = contato["criacao"]
nome = contato["pessoa"]["nome"]
nascimento = contato["pessoa"]["nascimento"]
email = contato ["pessoa"]["email"]
pessoa = Pessoa(nome,nascimento,email)
contato = Contato(criacao,pessoa,telefones)
contatos.append(contato)
nome = jsontest["propietario"]
agenda = Agenda(nome)
agenda.contatos = contatos
except FileNotFoundError:
agenda = CriarAgenda()
while(continuar):
try:
escolha = int(input("\nDigite o número correspondente a opção deejada:\n\n 1-Criar Agenda;\n 2-Incluir Contato;\n 3-Listar Contatos;\n 4-Remover Contato;\n 5-Buscar Contato; \n 6- Contar Quantidade de Contatos;\n 7- Sair e salvar.\n---->>"))
if (escolha == 1):
agenda = CriarAgenda()
elif (escolha == 2):
incluirContato(agenda)
elif (escolha == 3):
listarContatos(agenda)
elif (escolha == 4):
removerContato(agenda)
elif (escolha == 5):
buscarContato(agenda)
elif (escolha == 6):
quantidadeContatos(agenda)
elif (escolha == 7):
extrairSalvar(agenda)
break
else:
print("Opção Inválida")
except ValueError:
print("\n---\n!!!!Digite um número inteiro!!!\n---")
if (__name__ == "__main__"):
main()
|
{"/model/Agenda.py": ["/model/Contato.py"], "/App.py": ["/model/Agenda.py", "/model/Contato.py"]}
|
14,995
|
EduardaMarques/AgendaContatoApp
|
refs/heads/master
|
/model/Contato.py
|
class Contato():
def __init__(self, pessoa, telefones = []):
self.criacao = str(datetime.date.today())
self.pessoa = pessoa
self.telefone = telefones
def listarTelefones(self):
return self.telefones
|
{"/model/Agenda.py": ["/model/Contato.py"], "/App.py": ["/model/Agenda.py", "/model/Contato.py"]}
|
15,010
|
kuzmich/blogosphere
|
refs/heads/master
|
/blog/views.py
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from .models import Post
@login_required
def personal_feed(request):
user = request.user
posts = Post.objects.filter(blog__in=user.subscriptions.all()).exclude(read_by=user).order_by('-created')
return render(request, 'blog/feed.html', {'posts': posts})
|
{"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/tests.py": ["/blog/models.py"]}
|
15,011
|
kuzmich/blogosphere
|
refs/heads/master
|
/blog/models.py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.db.models.signals import m2m_changed, post_save
from django.dispatch import receiver
class Blog(models.Model):
user = models.ForeignKey('auth.User', verbose_name='пользователь')
name = models.CharField('название', max_length=150)
subscribers = models.ManyToManyField('auth.User',
related_name='subscriptions',
verbose_name='подписчики',
blank=True)
class Meta:
verbose_name = 'блог'
verbose_name_plural = 'блоги'
def __str__(self):
return '{0.name} ({0.user})'.format(self)
class Post(models.Model):
blog = models.ForeignKey(Blog, related_name='posts', verbose_name='блог')
title = models.CharField('заголовок', max_length=100)
text = models.TextField('содержание')
created = models.DateTimeField('создан', auto_now_add=True)
read_by = models.ManyToManyField('auth.User',
related_name='read_posts',
verbose_name='прочитано пользователями',
blank=True)
class Meta:
verbose_name = 'публикация'
verbose_name_plural = 'публикации'
def __str__(self):
return '{0.title} ({0.blog})'.format(self)
Subscriptions = Blog.subscribers.through
PostReadBy = Post.read_by.through
@receiver(m2m_changed, sender=Subscriptions)
def subscribers_changed(sender, **kwargs):
if kwargs['action'] == 'post_remove':
users = kwargs['pk_set']
blog = kwargs['instance']
PostReadBy.objects.filter(post__in=blog.posts.all(), user__in=users).delete()
@receiver(post_save, sender=Post)
def new_post_created(sender, **kwargs):
from django.core.mail import send_mail
if kwargs['created']:
post = kwargs['instance']
blog = post.blog
for subscriber in blog.subscribers.exclude(email=''):
send_mail('В блоге {} новая публикая: {}'.format(blog, post.title),
post.text,
from_email='news@blogosphere.com',
recipient_list=[subscriber.email])
|
{"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/tests.py": ["/blog/models.py"]}
|
15,012
|
kuzmich/blogosphere
|
refs/heads/master
|
/blog/admin.py
|
from django.contrib import admin
from .models import *
class BlogAdmin(admin.ModelAdmin):
list_display = ['name', 'user']
class PostAdmin(admin.ModelAdmin):
list_display = ['blog', 'title', 'text', 'created']
list_filter = ['blog__user']
admin.site.register(Blog, BlogAdmin)
admin.site.register(Post, PostAdmin)
|
{"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/tests.py": ["/blog/models.py"]}
|
15,013
|
kuzmich/blogosphere
|
refs/heads/master
|
/blog/tests.py
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from .models import *
class BlogTestCase(TestCase):
def setUp(self):
from django.contrib.auth.models import User
self.john = User.objects.create(username='john', email='john@lennon.com')
self.george = User.objects.create(username='george', email='george@mail.com')
self.fan = User.objects.create(username='fan', email='fan@yahoo.com')
self.jblog = Blog.objects.create(user=self.john)
def test_read_marks_deletion(self):
self.jblog.subscribers.add(self.fan, self.george)
post = self.jblog.posts.create(title='Post 1', text='Всем привет!')
post.read_by.add(self.fan, self.george)
self.assertEqual(post.read_by.count(), 2)
self.jblog.subscribers.remove(self.george)
self.assertEqual(post.read_by.count(), 1)
self.assertEqual(PostReadBy.objects.filter(post=post, user=self.george).count(), 0)
self.assertEqual(PostReadBy.objects.filter(post=post, user=self.fan).count(), 1)
def test_send_emails_to_subscribers(self):
from django.core import mail
self.assertEqual(len(mail.outbox), 0)
self.jblog.subscribers.add(self.fan, self.george)
post = self.jblog.posts.create(title='Post 1', text='Всем привет!')
self.assertEqual(len(mail.outbox), 2)
|
{"/blog/views.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/tests.py": ["/blog/models.py"]}
|
15,024
|
ezik/git_hub_api_tests
|
refs/heads/master
|
/config.py
|
class Config:
def __init__(self):
self.url = {
"base": "https://api.github.com/repos",
"ursus_repo": "/ursusrepublic/django_test",
"ezik_repo": "/ezik/selenium_python_training"
}
self.auth = {
"username": "username",
"password": "password"
}
|
{"/conftest.py": ["/config.py"]}
|
15,025
|
ezik/git_hub_api_tests
|
refs/heads/master
|
/test_create_pull_request.py
|
import requests
from requests.auth import HTTPBasicAuth
import random
import re
def test_create_pull_req(app_config):
"""This test is intended to create pull request. As precondition user has to:
- Prepare branch basing on existing branches in repo
- Prepare commit in new branch
- Create pull request for the commit
I used own repo on github to ensure that I have rights for pull requests
Hope comments will add more transparency on what is happening inside
Assertions in between are used to ensure that all data is created"""
base_url = app_config.url["base"]
repo = app_config.url["ezik_repo"]
username = app_config.auth["username"]
password = app_config.auth["password"]
print("\n=====START PRECONDITION STEPS=====")
get_branches_list_response = requests.get(base_url + repo + "/git/refs/heads")
branches_list_data = get_branches_list_response.json()
assert get_branches_list_response.status_code == 200
sha_list = list()
for sha in branches_list_data:
sha_list.append(tuple((sha["ref"], sha["object"]["sha"])))
assert sha_list[0][0] == "refs/heads/branch_requests_test"
ref_branch = "{0}_{1}".format(sha_list[0][0], random.choice(range(101, 10000))) # generate ref branch
head_branch_name = re.sub(r"refs/heads/", "", ref_branch, flags=re.I) # substring branch name from ref branch
print("HEAD BRANCH NAME...\n", head_branch_name)
create_branch_response = requests.post(base_url + repo + "/git/refs",
json={
"ref": ref_branch,
"sha": sha_list[0][1]
}, auth=HTTPBasicAuth(username=username, password=password))
assert create_branch_response.status_code == 201
new_branch_data = create_branch_response.json()
sha_latest_commit = new_branch_data["object"]["sha"]
print("LATEST SHA COMMIT...\n", sha_latest_commit)
sha_base_tree_response = requests.get(base_url + repo + "/git/commits/{}".format(sha_latest_commit))
assert sha_base_tree_response.status_code == 200
sha_base_tree_json = sha_base_tree_response.json()
sha_base_tree = sha_base_tree_json["tree"]["sha"]
commit_parent = sha_base_tree_json["parents"][0]["sha"]
create_commit_response = requests.post(base_url + repo + "/git/commits",
json={
"message": "freaking commit",
"tree": sha_base_tree,
"parents": [commit_parent]
}, auth=HTTPBasicAuth(username=username, password=password))
create_commit_data = create_commit_response.json()
assert create_commit_data["author"]["name"] == "ezik"
assert create_commit_data["message"] == "freaking commit"
print("=====END PRECONDITION STEPS=====")
create_pull_req_response = requests.post(base_url + repo + "/pulls",
json={
"title": "Amazing new feature",
"body": "Please pull this in!",
"head": head_branch_name,
"base": "master"
}, auth=HTTPBasicAuth(username=username, password=password))
pull_req_data = create_pull_req_response.json()
assert create_pull_req_response.status_code == 201
assert pull_req_data["state"] == "open"
assert pull_req_data["body"] == "Please pull this in!"
assert pull_req_data["head"]["ref"] == head_branch_name
|
{"/conftest.py": ["/config.py"]}
|
15,026
|
ezik/git_hub_api_tests
|
refs/heads/master
|
/conftest.py
|
from pytest import fixture
from config import Config
@fixture(scope='session', autouse=True)
def app_config():
config = Config()
return config
|
{"/conftest.py": ["/config.py"]}
|
15,027
|
ezik/git_hub_api_tests
|
refs/heads/master
|
/test_get_requests.py
|
import requests
def test_get_open_pull_reqs(app_config):
base_url = app_config.url["base"]
repo = app_config.url["ursus_repo"]
r = requests.get(base_url + repo + "/pulls?state=open")
assert len(r.json()) == 2
assert r.json()[0]["url"] == base_url + repo + "/pulls/5"
assert r.json()[0]["state"] == "open"
assert r.json()[0]["title"] == "random comment added"
def test_get_closed_pull_reqs(app_config):
base_url = app_config.url["base"]
repo = app_config.url["ursus_repo"]
r = requests.get(base_url + repo + "/pulls?state=closed")
assert len(r.json()) == 1
assert r.json()[0]["url"] == base_url + repo + "/pulls/6"
assert r.json()[0]["state"] == "closed"
assert r.json()[0]["title"] == "we don't need admin.py"
def test_get_branches_list(app_config):
base_url = app_config.url["base"]
repo = app_config.url["ursus_repo"]
r = requests.get(base_url + repo + "/branches")
json_data = r.json()
all_branches = list()
for el in json_data:
all_branches.append(el["name"])
assert len(all_branches) == 30
|
{"/conftest.py": ["/config.py"]}
|
15,028
|
ezik/git_hub_api_tests
|
refs/heads/master
|
/test_create_an_issue.py
|
import requests
from requests.auth import HTTPBasicAuth
def test_create_new_issue(app_config):
base_url = app_config.url["base"]
repo = app_config.url["ursus_repo"]
username = app_config.auth["username"]
password = app_config.auth["password"]
r = requests.post(base_url + repo + "/issues",
json={
"title": "Issue title for test",
"body": "Smth doesn't work!",
"labels": ["test", "amazing", "new_issue"],
"assignees": ["django_test"]
}, auth=HTTPBasicAuth(username=username, password=password))
assert r.json()["repository_url"] == base_url + repo
assert r.json()["title"] == "Issue title for test"
assert r.json()["body"] == "Smth doesn't work!"
|
{"/conftest.py": ["/config.py"]}
|
15,256
|
TrinaDutta95/BotResponse_sentiment
|
refs/heads/master
|
/setting.py
|
import nltk
nltk.download()
#nltk.download('nps_chat')
#posts = nltk.corpus.nps_chat.xml_posts()[:10000]
|
{"/web_flask.py": ["/bot_response.py"]}
|
15,257
|
TrinaDutta95/BotResponse_sentiment
|
refs/heads/master
|
/web_flask.py
|
import bot_response as br
from flask import Flask, request
from flask import jsonify
app = Flask(__name__)
@app.route("/", methods=["GET"])
def generate_response():
text = request.args.get("user_res")
print(text)
bot_res = br.sentence_processing(text)
return jsonify(additional_line=bot_res)
if __name__ == "__main__":
app.run()
|
{"/web_flask.py": ["/bot_response.py"]}
|
15,258
|
TrinaDutta95/BotResponse_sentiment
|
refs/heads/master
|
/bot_response.py
|
import string
from collections import Counter
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import random
# import matplotlib.pyplot as plt
# reading text from a file and converting them to lower case with removal of punctuations
def read_line(text):
lower_case = text.lower()
cleaned_text = lower_case.translate(str.maketrans('', '', string.punctuation))
return cleaned_text
# tokenizing the text
def preprocess_line(cleaned_text):
tokenized_words = word_tokenize(cleaned_text, "english")
# stop_words addition
final_words = []
for word in tokenized_words:
if word not in stopwords.words('english'):
final_words.append(word)
return final_words
# identifying emotion
def emotion_detect(sentences):
emotion_list = []
with open('emotions.txt', 'r') as file:
for line in file:
clear_line = line.replace("\n", '').replace(",", '').replace("'", '').strip()
word, emotion = clear_line.split(':')
if word in sentences:
emotion_list.append(emotion)
return emotion_list
def sentiment_analyse(sentiment_text):
score = SentimentIntensityAnalyzer().polarity_scores(sentiment_text)
# print(score)
neg = score['neg']
pos = score['pos']
if neg > pos:
sentiment = "negative"
elif pos > neg:
sentiment = "positive"
else:
sentiment = "neutral"
return sentiment
# type of text analysis
posts = nltk.corpus.nps_chat.xml_posts()[:10000]
def dialogue_act_features(post):
features = {}
for word in word_tokenize(post):
features['contains({})'.format(word.lower())] = True
return features
def bot_response(sentiment):
pos = ["Well said.", "You are thinking in the right direction.", "Very well thought.", "That’s great.",
"It sounds like you really know what the problems are.", "Nice, let’s talk about that a little.",
"It seems like you’ve been thinking about this a lot."]
neu = ["That’s okay.", "Okay!", "Okay, let’s keep going.", "That’s really understandable. "]
neg = ["I understand how you feel.", "I see how that can be frustrating.", "That must be really frustrating."
, "This is really bothering you."]
if sentiment == "negative":
return random.choice(neg)
elif sentiment == "positive":
return random.choice(pos)
else:
return random.choice(neu)
"""
# Plotting the emotions on the graph
fig, ax1 = plt.subplots()
ax1.bar(w.keys(), w.values())
fig.autofmt_xdate()
plt.savefig('graph.png')
plt.show()
"""
# processing sentence to get emotions/sentiments and bots response
def sentence_processing(text):
cleaned_text = read_line(text)
# count number of words in response
words = cleaned_text.split()
word_len = len(words)
if word_len <= 25:
final_words = preprocess_line(cleaned_text)
# print(emotion_detect(final_words))
w = Counter(emotion_detect(final_words))
# print(w)
# sentiment analysis
sentiment = sentiment_analyse(cleaned_text)
bot_res1 = bot_response(sentiment)
else:
long_res = ["It sounds like you really know what the problems are.", "Nice, let’s talk about that a little.",
"It seems like you’ve been thinking about this a lot.",
"Struggling with communication is difficult"]
bot_res1 = random.choice(long_res)
# classification of type of sentence
featuresets = [(dialogue_act_features(post.text), post.get('class')) for post in posts]
size = int(len(featuresets) * 0.1)
train_set, test_set = featuresets[size:], featuresets[:size]
classifier = nltk.NaiveBayesClassifier.train(train_set)
# print(nltk.classify.accuracy(classifier, test_set))
sentence_type = classifier.classify(dialogue_act_features(text))
bot_res2 = ""
if sentence_type == 'whQuestion':
bot_res2 = "Let's focus more on the session."
res = bot_res1 + bot_res2
return res
"""
if __name__=="__main__":
text = input("User: ")
print(sentence_processing(text))
"""
|
{"/web_flask.py": ["/bot_response.py"]}
|
15,273
|
grubberr/dog_and_cats
|
refs/heads/master
|
/utils.py
|
import sys
import datetime
from models import date_format
def get_birthday_or_exit(birthday):
try:
return datetime.datetime.strptime(birthday, date_format)
except ValueError:
print("incorrect birthday: %s" % birthday)
sys.exit(1)
|
{"/utils.py": ["/models.py"], "/users.py": ["/models.py", "/utils.py"], "/pets.py": ["/models.py", "/utils.py"]}
|
15,274
|
grubberr/dog_and_cats
|
refs/heads/master
|
/users.py
|
#!/Users/ant/dog_and_cats/env/bin/python3
import sys
import argparse
import datetime
from mongoengine.errors import ValidationError
from models import User, date_format, Pet, Cat, Dog
from utils import get_birthday_or_exit
def main(args):
if args.subparser == 'add':
birthday = get_birthday_or_exit(args.birthday)
u = User(
first_name=args.first_name,
last_name=args.last_name,
birthday=birthday)
u.save()
print("user created id = '%s'" % u.pk)
elif args.subparser == 'del':
try:
u = User.objects.get(id=args.pk)
except (User.DoesNotExist, ValidationError):
print("user not found id = '%s'" % args.pk)
else:
u.delete()
print("user removed id = '%s'" % u.pk)
elif args.subparser == 'update':
try:
u = User.objects.get(id=args.pk)
except (User.DoesNotExist, ValidationError):
print("user not found id = '%s'" % args.pk)
else:
if args.first_name:
u.first_name = args.first_name
if args.last_name:
u.last_name = args.last_name
if args.birthday:
birthday = get_birthday_or_exit(args.birthday)
u.birthday = birthday
u.save()
print("user updated id = '%s'" % u.pk)
elif args.subparser == 'list':
if args.pk:
try:
u = User.objects.get(id=args.pk)
except (User.DoesNotExist, ValidationError):
print("user not found id = '%s'" % args.pk)
else:
print("pk = %s" % u.pk)
print("first_name = %s" % u.first_name)
print("last_name = %s" % u.last_name)
print("birthday = %s" % u.birthday.strftime(date_format))
else:
for u in User.objects:
print(u)
elif args.subparser == 'list_pets':
try:
u = User.objects.get(id=args.pk)
except (User.DoesNotExist, ValidationError):
print("user not found id = '%s'" % args.pk)
sys.exit(1)
print("User: %s %s has next pets:" % (u.first_name, u.last_name))
for p in Pet.objects(owner=u):
if isinstance(p, Cat):
print("Cat: ", p)
elif isinstance(p, Dog):
print("Dog: ", p)
elif args.subparser == 'top':
from collections import defaultdict
TOP = defaultdict(list)
for p in Pet.objects:
TOP[p.owner].append(p)
res = list(TOP.items())
res.sort(key=lambda x:len(x[1]), reverse=True)
for (u, pets) in res:
print("%s %s" % (u.first_name, u.last_name))
for p in pets:
print("\t", p.name)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subparser')
parser_add = subparsers.add_parser('add')
parser_add.add_argument('--first_name', required=True)
parser_add.add_argument('--last_name', required=True)
parser_add.add_argument('--birthday', required=True)
parser_update = subparsers.add_parser('update')
parser_update.add_argument('--pk', required=True)
parser_update.add_argument('--first_name')
parser_update.add_argument('--last_name')
parser_update.add_argument('--birthday')
parser_del = subparsers.add_parser('del')
parser_del.add_argument('--pk', required=True)
parser_list = subparsers.add_parser('list')
parser_list.add_argument('--pk')
parser_list_pets = subparsers.add_parser('list_pets')
parser_list_pets.add_argument('--pk', required=True)
parser_top = subparsers.add_parser('top')
args = parser.parse_args()
if args.subparser is None:
parser.print_help()
sys.exit(1)
main(args)
|
{"/utils.py": ["/models.py"], "/users.py": ["/models.py", "/utils.py"], "/pets.py": ["/models.py", "/utils.py"]}
|
15,275
|
grubberr/dog_and_cats
|
refs/heads/master
|
/pets.py
|
#!/Users/ant/dog_and_cats/env/bin/python3
import sys
import argparse
import datetime
from mongoengine.errors import ValidationError
from models import User, Pet, Dog, Cat, date_format
from utils import get_birthday_or_exit
def main(args):
if args.subparser == 'add':
birthday = get_birthday_or_exit(args.birthday)
try:
u = User.objects.get(id=args.owner)
except (User.DoesNotExist, ValidationError):
print("owner not found id = '%s'" % args.owner)
sys.exit(1)
if args.type == 'dog':
p = Dog(name=args.name, birthday=birthday, owner=u)
p.save()
print("Dog created id = '%s'" % p.pk)
elif args.type == 'cat':
p = Cat(name=args.name, birthday=birthday, owner=u)
p.save()
print("Cat created id = '%s'" % p.pk)
elif args.subparser == 'del':
try:
p = Pet.objects.get(id=args.pk)
except (Pet.DoesNotExist, ValidationError):
print("pet not found id = '%s'" % args.pk)
else:
p.delete()
print("pet removed id = '%s'" % p.pk)
elif args.subparser == 'update':
try:
p = Pet.objects.get(id=args.pk)
except (Pet.DoesNotExist, ValidationError):
print("pet not found id = '%s'" % args.pk)
else:
if args.name:
p.name = args.name
if args.birthday:
p.birthday = get_birthday_or_exit(args.birthday)
p.save()
print("pet updated id = '%s'" % p.pk)
elif args.subparser == 'list':
if args.pk:
try:
p = Pet.objects.get(id=args.pk)
except (Pet.DoesNotExist, ValidationError):
print("pet not found id = '%s'" % args.pk)
else:
if isinstance(p, Cat):
print("Cat")
elif isinstance(p, Dog):
print("Dog")
print("pk = %s" % p.pk)
print("name = %s" % p.name)
print("birthday = %s" % p.birthday.strftime(date_format))
else:
for p in Pet.objects:
if isinstance(p, Cat):
print("Cat: ", p)
elif isinstance(p, Dog):
print("Dog: ", p)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subparser')
parser_add = subparsers.add_parser('add')
parser_add.add_argument('--type', choices=['cat', 'dog'], required=True)
parser_add.add_argument('--name', required=True)
parser_add.add_argument('--birthday', required=True)
parser_add.add_argument('--owner', required=True)
parser_update = subparsers.add_parser('update')
parser_update.add_argument('--pk', required=True)
parser_update.add_argument('--name')
parser_update.add_argument('--birthday')
parser_del = subparsers.add_parser('del')
parser_del.add_argument('--pk', required=True)
parser_list = subparsers.add_parser('list')
parser_list.add_argument('--pk')
args = parser.parse_args()
if args.subparser is None:
parser.print_help()
sys.exit(1)
main(args)
|
{"/utils.py": ["/models.py"], "/users.py": ["/models.py", "/utils.py"], "/pets.py": ["/models.py", "/utils.py"]}
|
15,276
|
grubberr/dog_and_cats
|
refs/heads/master
|
/models.py
|
#!/Users/ant/dog_and_cats/env/bin/python3
from mongoengine import *
connect('mydb')
date_format = '%m/%d/%Y'
class User(Document):
first_name = StringField(required=True)
last_name = StringField(required=True)
birthday = DateTimeField(required=True)
def __str__(self):
return "pk = %s, first_name: %s, last_name: %s, birthday: %s" % (
self.pk,
self.first_name,
self.last_name,
self.birthday.strftime(date_format))
class Pet(Document):
name = StringField(required=True)
birthday = DateTimeField(required=True)
owner = ReferenceField('User', required=True)
meta = {'allow_inheritance': True}
def __str__(self):
return "pk = %s, name: %s, birthday: %s, owner: %s" % (
self.pk,
self.name,
self.birthday.strftime(date_format),
self.owner.first_name)
class Cat(Pet):
pass
class Dog(Pet):
pass
|
{"/utils.py": ["/models.py"], "/users.py": ["/models.py", "/utils.py"], "/pets.py": ["/models.py", "/utils.py"]}
|
15,281
|
StackSentinel/stacksentinel-python
|
refs/heads/master
|
/tests/test_client.py
|
"""
Tests for Stack Sentinel client.
"""
import json
from StackSentinel import StackSentinelMiddleware, StackSentinelClient, StackSentinelError
import unittest
import sys
class TestStackSentinel(unittest.TestCase):
def setUp(self):
pass
def test_serialize_object(self):
class RegularClass(object):
pass
class ObjectThatCantBeRepr(object):
def __repr__(self):
raise RuntimeError
x = RegularClass()
y = StackSentinelClient._serialize_object(x)
if not (y.startswith('<') and 'RegularClass' in y):
self.fail('Unexpected result from _serialize_object: %r' % y)
x = ObjectThatCantBeRepr()
y = StackSentinelClient._serialize_object(x)
self.failUnlessEqual(y, '<Cannot Be Serialized>')
def test_get_sys(self):
client = StackSentinelClient('', '', 'unittest')
sys_info = client._get_sys_info()
self.failUnlessEqual(sys.path, sys_info['path'])
def test_get_machine_info(self):
client = StackSentinelClient('', '', 'unittest')
machine_info = client._get_machine_info()
self.failUnless('hostname' in machine_info)
def test_generate_request(self):
client = StackSentinelClient('', '', 'unittest')
(request, payload) = client._generate_request(
environment='unitest',
error_message='TEST ERROR MESSAGE',
error_type='TEST ERROR TYPE',
return_feedback_urls=True,
state={
'test': range(100)
},
tags=['cheese'],
traceback=[]
)
payload_parsed = json.loads(payload)
self.failUnless('return_feedback_urls' in payload)
def test_handle_exception(self):
client = StackSentinelClient('', '', 'unittest')
try:
x = 1 / 0
except:
send_error_args = client.handle_exception(dry_run=True)
if not isinstance(send_error_args, dict):
self.fail('Did not return dict from handle_exception with dry_run enabled.')
if __name__ == '__main__':
unittest.main()
|
{"/tests/test_client.py": ["/StackSentinel/__init__.py"]}
|
15,282
|
StackSentinel/stacksentinel-python
|
refs/heads/master
|
/StackSentinel/__init__.py
|
"""
StackSentinel Python Client
===========================
Use this client to integrate StackSentinel (www.stacksentinel.com) into your Python projects. You can also use
platform-specific StackSentinel clients, such as the stacksentinel-flask client:
>>> import StackSentinel
>>> stack_sentinel_client = StackSentinel.StackSentinelClient(
... account_token='-- YOUR ACCOUNT TOKEN --',
... project_token='-- YOUR PROJECT TOKEN --',
... environment='development-experiment', tags=['documentation-test'])
>>> print stack_sentinel_client
<StackSentinel.StackSentinelClient object at 0x10bcfbb90>
>>> try:
... oops = 1 / 0
... except:
... stack_sentinel_client.handle_exception()
...
That's all it takes. The information about the exception, along with platform and machine information, is gathered
up and sent to Stack Sentinel.
For WSGI applications, you can use the WSGI Middleware included with this project:
>>> app = StackSentinelMiddleware(app, stack_sentinel_client)
Compatibility
=============
This StackSentinel Python Client is compatible with Python 2.7 and 3.x and Stack Sentinel API v1.
License
=======
Copyright 2015 Stack Sentinel. All Rights Reserved.
This software is licensed under the Apache License, version 2.0.
See LICENSE for full details.
Getting Help
============
Email support@stacksentinel.com with your questions.
"""
import json
import os
import sys
#
# Some sandboxed environments do not have socket
try:
import socket
except:
socket = None
#
# Some sandboxed environments do not have platform
try:
import platform
except:
platform = None
#
# Python2/3
try:
from urllib2 import urlopen, Request, HTTPError
except ImportError:
from urllib.request import urlopen, Request, HTTPError
class StackSentinelError(ValueError):
"""
Exception raised when there is an error communicating with backend or generating request for backend.
"""
pass
class StackSentinelClient(object):
"""
Client to send exceptions to StackSentinel. See in particular the handle_exception method, which can be called
within an except block. See also the send_error method, which at a lower level generates an appropriate payload
for the StackSentinel API.
"""
USER_AGENT = 'STACK SENTINEL PYTHON CLIENT'
def __init__(self, account_token, project_token, environment, tags=None,
endpoint="https://api.stacksentinel.com/api/v1/insert"):
"""
:param account_token: Your account token, as supplied by StackSentinel
:param project_token: Your project token, as supplied by StackSentinel
:param environment: The environment of the project (eg, "production", "devel", etc)
:param tags: Any tags you want associated with *all* errors sent using this client.
:param endpoint: API endpoint. Defaults to StackSentinel backend.
"""
self.account_token = account_token
self.project_token = project_token
self.endpoint = endpoint
self.environment = environment
if tags:
self.tags = tags
else:
self.tags = []
@staticmethod
def _serialize_object(obj):
"""
When the state of an exception includes something that we can't pickle, show something useful instead.
"""
try:
return repr(obj)
except:
return '<Cannot Be Serialized>'
def handle_exception(self, exc_info=None, state=None, tags=None, return_feedback_urls=False,
dry_run=False):
"""
Call this method from within a try/except clause to generate a call to Stack Sentinel.
:param exc_info: Return value of sys.exc_info(). If you pass None, handle_exception will call sys.exc_info() itself
:param state: Dictionary of state information associated with the error. This could be form data, cookie data, whatnot. NOTE: sys and machine are added to this dictionary if they are not already included.
:param tags: Any string tags you want associated with the exception report.
:param return_feedback_urls: If True, Stack Sentinel will return feedback URLs you can present to the user for extra debugging information.
:param dry_run: If True, method will not actively send in error information to API. Instead, it will return a request object and payload. Used in unittests.
"""
if not exc_info:
exc_info = sys.exc_info()
if exc_info is None:
raise StackSentinelError("handle_exception called outside of exception handler")
(etype, value, tb) = exc_info
try:
msg = value.args[0]
except:
msg = repr(value)
if not isinstance(tags, list):
tags = [tags]
limit = None
new_tb = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
tb = tb.tb_next
n = n + 1
new_tb.append({'line': lineno, 'module': filename, 'method': name})
if state is None:
state = {}
if 'sys' not in state:
try:
state['sys'] = self._get_sys_info()
except Exception as e:
state['sys'] = '<Unable to get sys: %r>' % e
if 'machine' not in state:
try:
state['machine'] = self._get_machine_info()
except Exception as e:
state['machine'] = '<Unable to get machine: %e>' % e
if tags is None:
tags = []
# The joy of Unicode
if sys.version_info.major > 2:
error_type = str(etype.__name__)
error_message = str(value)
else:
error_type = unicode(etype.__name__)
error_message = unicode(value)
send_error_args = dict(error_type=error_type,
error_message=error_message,
traceback=new_tb,
environment=self.environment,
state=state,
tags=self.tags + tags,
return_feedback_urls=return_feedback_urls)
if dry_run:
return send_error_args
else:
return self.send_error(**send_error_args)
def _get_sys_info(self):
sys_info = {
'version': sys.version,
'version_info': sys.version_info,
'path': sys.path,
'platform': sys.platform
}
return sys_info
def _get_machine_info(self):
machine = {}
if socket:
try:
machine['hostname'] = socket.gethostname()
except Exception as e:
machine['hostname'] = '<Could not determine: %r>' % (e,)
else:
machine['hostname'] = "<socket module not available>"
machine['environ'] = dict(os.environ)
if platform:
machine['platform'] = platform.uname()
machine['node'] = platform.node()
machine['libc_ver'] = platform.libc_ver()
machine['version'] = platform.version()
machine['dist'] = platform.dist()
return machine
def send_error(self, error_type, error_message, traceback, environment, state, tags=None,
return_feedback_urls=False):
"""
Sends error payload to Stack Sentinel API, returning a parsed JSON response. (Parsed as in,
converted into Python dict/list objects)
:param error_type: Type of error generated. (Eg, "TypeError")
:param error_message: Message of error generated (Eg, "cannot concatenate 'str' and 'int' objects")
:param traceback: List of dictionaries. Each dictionary should contain, "line", "method", and "module" keys.
:param environment: Environment the error occurred in (eg, "devel")_
:param state: State of the application when the error happened. Could contain form data, cookies, etc.
:param tags: Arbitrary tags you want associated with the error. list.
:param return_feedback_urls: If True, return payload will offer URLs to send users to collect additional feedback for debugging.
:return: Parsed return value from Stack Sentinel API
"""
(request, payload) = self._generate_request(environment, error_message, error_type, return_feedback_urls,
state, tags, traceback)
try:
response = urlopen(request)
except HTTPError as e:
if e.code == 400:
raise StackSentinelError(e.read())
else:
raise
if sys.version_info.major > 2:
text_response = response.read().decode(response.headers.get_content_charset() or 'utf8')
else:
encoding = response.headers.get('content-type', '').split('charset=')[-1].strip()
if encoding:
text_response = response.read().decode('utf8', 'replace')
else:
text_response = response.read().decode(encoding)
return json.loads(text_response)
def _generate_request(self, environment, error_message, error_type, return_feedback_urls, state, tags, traceback):
payload = json.dumps(dict(
account_token=self.account_token,
project_token=self.project_token,
return_feedback_urls=return_feedback_urls,
errors=[dict(
error_type=error_type,
error_message=error_message,
environment=environment,
traceback=traceback,
state=state,
tags=tags or []
)]
), default=self._serialize_object)
request = Request(self.endpoint, data=payload.encode('utf8'), headers={
'Accept-Charset': 'utf-8',
"Content-Type": "application/x-www-form-urlencoded ; charset=UTF-8",
'User-Agent': self.USER_AGENT})
return (request, payload)
class StackSentinelMiddleware(object):
"""
Stack Sentinel middleware client. As easy as this:
>>> client = StackSentinelClient(...)
>>> app = StackSentinelMiddleware(app, client)
"""
def __init__(self, app, client):
"""
:param app: WSGI application object
:param client: Instance of StackSentinel
"""
self.app = app
self.client = client
def __call__(self, environ, start_response):
result = None
try:
result = self.app(environ, start_response)
except Exception:
self.client.handle_exception(state={'wsgi_environ': environ})
raise
try:
if result is not None:
for i in result:
yield i
except Exception:
self.client.handle_exception(state={'wsgi_environ': environ})
raise
finally:
if hasattr(result, 'close'):
result.close()
|
{"/tests/test_client.py": ["/StackSentinel/__init__.py"]}
|
15,283
|
StackSentinel/stacksentinel-python
|
refs/heads/master
|
/setup.py
|
from setuptools import setup, find_packages
version = '1.2'
url = 'https://github.com/StackSentinel/stacksentinel-python'
setup(
name='stacksentinel',
description='Stack Sentinel client and WSGI middleware',
keywords='stack sentinel stacksentinel exception tracking api',
version=version,
author="Jeri MgCuckin",
author_email="jerymcguckin@stacksentinel.com",
url=url,
test_suite='tests',
packages=find_packages(exclude=['tests']),
classifiers=["Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4"
'Operating System :: OS Independent',
'Programming Language :: Python',
'Intended Audience :: Developers',
],
license='Apache License (2.0)'
)
|
{"/tests/test_client.py": ["/StackSentinel/__init__.py"]}
|
15,286
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/resources/vendedor.py
|
from flask.helpers import make_response
from flask_restful import Resource, reqparse
from flask import render_template,request
from models.vendedor import VendedorModel
from werkzeug.security import safe_str_cmp
import traceback
import os
atributos = reqparse.RequestParser()
atributos.add_argument('nome_vendedor', type=str, required=True, help="Ei! o seu 'nome' é obrigatório!")
atributos.add_argument('telefone_vendedor', type=str, required=True, help="Ei! o seu 'telefone' é obrigatório!")
atributos.add_argument('cnpj_vendedor', type=str, required=True, help="Ei! o seu 'cnpj' é obrigatório!")
atributos.add_argument('email_vendedor', type=str, required=True, help="Ei! o seu 'e-mail' é obrigatório!")
atributos.add_argument('senha_vendedor', type=str, required=True, help="Ei! a sua 'senha' é obrigatória!")
atributos.add_argument('endereco_vendedor', type=str, required=True, help="Ei! o seu 'endereço' é obrigatório!")
atributos.add_argument('numero_end_vendedor', type=str, required=True, help="Ei! o seu 'número de endereço' é obrigatório!")
atributos.add_argument('complemento_vendedor', type=str, required=False)
atributos.add_argument('bairro_vendedor', type=str, required=True, help="Ei! o seu 'bairro' é obrigatório!")
atributos.add_argument('cep_vendedor', type=str, required=True, help="Ei! o seu 'cep' é obrigatório!")
atributos.add_argument('ativado', type=bool)
atributos_login = reqparse.RequestParser()
atributos_login.add_argument('email_vendedor', type=str, required=True, help="Ei! o seu 'e-mail' é obrigatório!")
atributos_login.add_argument('senha_vendedor', type=str, required=True, help="Ei! a sua 'senha' é obrigatória!")
atributos.add_argument('ativado', type=bool)
class Vendedores(Resource):
def get(self, id_vendedor):
vendedor = VendedorModel.achar_vendedor(id_vendedor)
if vendedor:
return vendedor.json()
return make_response(render_template("home.html", message= "Vendedor não encontrado."), 404)
def put(self, id_vendedor):
dados = atributos.parse_args()
vendedor = VendedorModel.achar_vendedor(id_vendedor)
if vendedor:
vendedor.atualizar_vendedor(**dados)
vendedor.salvar_vendedor()
return {"message": "Vendedor atualizado com sucesso!"}, 200
vendedor = VendedorModel(**dados)
vendedor.salvar_vendedor()
return make_response(render_template("home.html", message="Vendedor criado com sucesso!"), 201)
def delete(self, id_vendedor):
vendedor = VendedorModel.achar_vendedor(id_vendedor)
if vendedor:
vendedor.deletar_vendedor()
return {"message": "Vendedor deletado com sucesso!"}, 200
return make_response(render_template("home.html", message="Vendedor não encontrado."), 404)
class VendedorRegistro(Resource):
def post(self):
dados = atributos.parse_args()
if VendedorModel.achar_por_login(dados['email_vendedor']):
return {"message": "O seu login '{}' já existe".format(dados['email_vendedor'])}
vendedor = VendedorModel(**dados)
vendedor.ativado = False
try:
vendedor.salvar_vendedor()
vendedor.enviar_email_confirmacao_vendedor()
except:
vendedor.deletar_vendedor()
traceback.print_exc()
return make_response(render_template("cadastro_vendedores.html", message='Erro interno de servidor'), 500)
return make_response(render_template("login_vendedor.html", message="Sucesso! Cadastro pendente de confirmação via email"), 201)
class VendedorLogin(Resource):
@classmethod
def post(cls):
dados = atributos_login.parse_args()
user = VendedorModel.achar_por_login(dados['email_vendedor'])
if user and safe_str_cmp(user.senha_vendedor, dados['senha_vendedor']):
if user.ativado:
r = make_response(render_template("home_login.html", message='Login realizado com sucesso!'), 200)
r.set_cookie("login", dados["email_vendedor"], samesite="Strict")
r.set_cookie("senha", dados["senha_vendedor"], samesite="Strict")
# r.set_cookie("ativado", dados["ativado"], samesite="Strict")
return r
return make_response(render_template("login.html", message='Usuário não confirmado, por favor verifique seu e-mail'), 400)
return make_response(render_template("login.html", message='Usuário ou senha incorretos.'), 401)
class VendedorLogout(Resource):
def post(self):
r = make_response(render_template("cadastro_vendedores.html", message="Deslogou com sucesso!"))
r.set_cookie("email_vendedor", "")
r.set_cookie("senha_vendedor", "")
return r
class VendedorConfirmado(Resource):
@classmethod
def get(cls, id_vendedor):
vendedor = VendedorModel.achar_vendedor(id_vendedor)
if not vendedor:
return {'message': 'Vendedor não encontrado'}, 404
vendedor.ativado = True
vendedor.salvar_vendedor()
headers = {'Content-Type': 'text/html'}
return make_response(render_template('usuario_confirmado.html'), 200, headers)
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,287
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/resources/produtos.py
|
from flask_restful import Resource, reqparse
from flask import render_template,request
from models.produtos import ProdutosModel
from flask.helpers import make_response
from werkzeug.security import safe_str_cmp
import random
import sqlite3
import os
# import traceback
atributos = reqparse.RequestParser()
atributos.add_argument('nome_produto', type=str, required=True, help="Ei! o 'nome' é obrigatório!")
atributos.add_argument('codigo_produto', type=str, required=True, help="Ei! o 'Codigo' é obrigatório!")
atributos.add_argument('descricao_produto', type=str, required=False)
atributos.add_argument('preco_produto', type=float, required=True, help="Ei! o 'preço' é obrigatório!")
atributos.add_argument('tipo_produto', type=str, required=True, help="Ei! o 'tipo' é obrigatório!")
atributos.add_argument('filtro_produto', type=str, required=True, help="Ei! o 'filtro' é obrigatório!")
atributos.add_argument('quantidade_produto', type=str, required=True, help="Ei! a 'quantidade' é obrigatória!")
class Produto(Resource):
def put(self, id_produto):
dados = atributos.parse_args()
produto = ProdutosModel.listar_por_id(id_produto)
if produto:
produto.atualizar_produto(**dados)
produto.salvar_produto()
return make_response(render_template("home.html", message="Produto atualizado com sucesso!"), 200)
produto = ProdutosModel(**dados)
produto.salvar_produto()
return make_response(render_template("home.html", message="Produto criado com sucesso!"), 201)
def delete(self, id_produto):
produto = ProdutosModel.listar_por_id(id_produto)
if produto:
produto.deletar_produto()
return make_response(render_template("home.html", message="Produto deletado com sucesso!"), 200)
return make_response(render_template("home.html", message="Produto não encontrado!"), 404)
class ProdutosRegistro(Resource):
def post(self):
dados = atributos.parse_args()
if ProdutosModel.achar_por_codigo(dados['codigo_produto']):
return make_response(render_template("home.html", message="Você já cadastrou esse produto!"), 409)
produtos = ProdutosModel(**dados)
produtos.salvar_produto()
return make_response(render_template("home.html", message="Produto cadastrado com sucesso!"), 201)
class ProdutosBuscaSimples(Resource):
def get(self):
return {'produto': [produto.json() for produto in ProdutosModel.query.all()]}
def listar_produtos():
banco = sqlite3.connect("banco.db")
cursor = banco.cursor()
consulta = "SELECT nome_produto,preco_produto,descricao_produto, tipo_produto, filtro_produto FROM produto"
cursor.execute(consulta)
resultado = cursor.fetchall()
produtos = []
for linha in resultado:
produtos.append({
'nome_produto': linha[0],
'preco_produto': linha[1],
'descricao_produto': linha[2],
'tipo_produto': linha[3],
'filtro_produto': linha[4]})
return produtos
def extensao_arquivo(filename):
if '.' not in filename:
return ''
return filename.rsplit('.', 1)[1].lower()
def salvar_arquivo_upload():
import uuid
if "foto" in request.files:
foto = request.files["foto"]
e = extensao_arquivo(foto.filename)
if e in ['jpg', 'jpeg', 'png', 'gif', 'svg', 'webp']:
u = uuid.uuid1()
n = f"{u}.{e}"
foto.save(os.path.join("fotos_produtos", n))
return n
return ""
"""
class ProdutosBuscaFiltro(Resource):
def get(self, filtro_produto):
produtos = ProdutosModel.listar_produtos_especifico(filtro_produto)
if produtos:
return produtos.json()
return make_response(render_template("home.html", message="Produto não encontrado."), 404)
"""
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,288
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/resources/entregador.py
|
from flask_restful import Resource, reqparse
from flask import render_template
from models.entregador import EntregadorModel
from flask.helpers import make_response
from werkzeug.security import safe_str_cmp
import traceback
atributos = reqparse.RequestParser()
atributos.add_argument('nome_entregador', type=str, required=True, help="Ei! o seu 'nome' é obrigatório!")
atributos.add_argument('cpf_entregador', type=str, required=True, help="Ei! o seu 'cpf' é obrigatório!")
atributos.add_argument('telefone_entregador', type=str, required=True, help="Ei! o seu 'telefone' é obrigatório!")
atributos.add_argument('cnh_entregador', type=str, required=True, help="Ei! a sua 'cnh' é obrigatória!")
atributos.add_argument('email_entregador', type=str, required=True, help="Ei! o seu 'e-mail' é obrigatório!")
atributos.add_argument('senha_entregador', type=str, required=True, help="Ei! a sua 'senha' é obrigatória!")
atributos.add_argument('endereco_entregador', type=str, required=True, help="Ei! o seu 'endereço' é obrigatório!")
atributos.add_argument('numero_end_entregador', type=str, required=True, help="Ei! o seu 'número de endereço' é obrigatório!")
atributos.add_argument('complemento_entregador', type=str, required=False)
atributos.add_argument('bairro_entregador', type=str, required=True, help="Ei! o seu 'bairro' é obrigatório!")
atributos.add_argument('cep_entregador', type=str, required=True, help="Ei! o seu 'cep' é obrigatório!")
atributos.add_argument('ativado', type=bool)
atributos_login = reqparse.RequestParser()
atributos_login.add_argument('email_entregador', type=str, required=True, help="Ei! o seu 'e-mail' é obrigatório!")
atributos_login.add_argument('senha_entregador', type=str, required=True, help="Ei! a sua 'senha' é obrigatória!")
atributos.add_argument('ativado', type=bool)
class Entregadores(Resource):
def get(self, id_entregador):
entregador = EntregadorModel.achar_entregador(id_entregador)
if entregador:
return entregador.json()
return make_response(render_template("home.html", message="Entregador não encontrado."), 404)
def put(self, id_entregador):
dados = atributos.parse_args()
entregador = EntregadorModel.achar_entregador(id_entregador)
if entregador:
entregador.atualizar_entregador(**dados)
entregador.salvar_entregador()
return {"message": "Entregador atualizado com sucesso!"}, 200
entregador = EntregadorModel(**dados)
entregador.salvar_entregador()
return make_response(render_template("home.html", message="Entregador criado com sucesso!"), 201)
def delete(self, id_entregador):
entregador = EntregadorModel.achar_entregador(id_entregador)
if entregador:
entregador.deletar_entregador()
return make_response(render_template("home.html", message='Entregador deletado com sucesso!'), 200)
return make_response(render_template("home.html", message='Entregador não encontrado!'), 404)
class EntregadorRegistro(Resource):
def post(self):
dados = atributos.parse_args()
if EntregadorModel.achar_por_login(dados['email_entregador']):
return {"message": "O seu login '{}' já existe".format(dados['email_entregador'])}
entregador = EntregadorModel(**dados)
entregador.ativado = False
try:
entregador.salvar_entregador()
entregador.enviar_confirmacao_email_entregador()
except:
entregador.deletar_entregador()
traceback.print_exc()
return make_response(render_template("cadastro_entregador.html", message='Erro interno de servidor'), 500)
return make_response(render_template("login.html", message="Sucesso! Cadastro pendente de confirmação via email"), 201)
class EntregadorLogin(Resource):
@classmethod
def post(cls):
dados = atributos_login.parse_args()
entregador = EntregadorModel.achar_por_login(dados['email_entregador'])
if entregador and safe_str_cmp(entregador.senha_entregador, dados['senha_entregador']):
if entregador.ativado:
return make_response(render_template("home.html", message='Login realizado com sucesso!'), 200)
return make_response(render_template("login.html", message='Usuário não confirmado'), 400)
return make_response(render_template("login.html", message='Usuário ou senha incorretos.'), 401)
class EntregadorLogout(Resource):
def post(self):
r = make_response(render_template("cadastro_entregador.html", message="Deslogou com sucesso!"))
r.set_cookie("email_entregador", "")
r.set_cookie("senha_entregador", "")
return r
class EntregadorConfirmado(Resource):
@classmethod
def get(cls, id_entregador):
entregador = EntregadorModel.achar_entregador(id_entregador)
if not entregador:
return {'message': 'Usuário não encontrado'}, 404
entregador.ativado = True
entregador.salvar_entregador()
headers = {'Content-Type': 'text/html'}
return make_response(render_template('usuario_confirmado.html'), 200, headers)
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,289
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/models/usuario.py
|
from operator import imod
from flask.helpers import url_for
from sql_alchemy import banco
from flask import request
from requests import post
import mailgun
domain = mailgun.MAILGUN_DOMAIN
api_key = mailgun.MAILGUN_API_KEY
from_title = mailgun.FROM_TITLE
from_email = mailgun.FROM_EMAIL
class UsuarioModel(banco.Model):
__tablename__ = 'usuarios'
id_usuario = banco.Column(banco.Integer, primary_key=True)
nome_usuario = banco.Column(banco.String(50))
telefone_usuario = banco.Column(banco.String(11))
cpf_usuario = banco.Column(banco.String(11))
email_usuario = banco.Column(banco.String(50), nullable=False, unique=True)
senha_usuario = banco.Column(banco.String(30), nullable=False)
endereco_usuario = banco.Column(banco.String(40))
numero_end_usuario = banco.Column(banco.String(10))
complemento_usuario = banco.Column(banco.String(30))
bairro_usuario = banco.Column(banco.String(20))
cep_usuario = banco.Column(banco.String(8))
ativado = banco.Column(banco.Boolean, default=False)
def __init__(self, nome_usuario, telefone_usuario, cpf_usuario, email_usuario, senha_usuario, endereco_usuario, numero_end_usuario, complemento_usuario, bairro_usuario, cep_usuario, ativado):
self.nome_usuario = nome_usuario
self.telefone_usuario = telefone_usuario
self.cpf_usuario = cpf_usuario
self.email_usuario = email_usuario
self.senha_usuario = senha_usuario
self.endereco_usuario = endereco_usuario
self.numero_end_usuario = numero_end_usuario
self.complemento_usuario = complemento_usuario
self.bairro_usuario = bairro_usuario
self.cep_usuario = cep_usuario
self.ativado = ativado
def enviar_confirmacao_email(self):
link = request.url_root[:-1] + url_for('usuarioconfirmado',id_usuario=self.id_usuario)
return post('https://api.mailgun.net/v3/{}/messages'.format(domain),
auth=('api', api_key),
data={'from': '{} <{}>'.format(from_title, from_email),
'to': self.email_usuario,
'subject': 'Confirmação de Cadastro',
'text': 'Confirme seu cadastro clicando no link a seguir: {}'.format(link),
'html': '<html><p>\
Confirme seu cadastro clicando no link a seguir:<a href="{}">CONFIRMAR EMAIL</a>\
</p><html>'.format(link)
})
def json(self):
return {
'nome_usuario': self.nome_usuario,
'telefone_usuario': self.telefone_usuario,
'cpf_usuario': self.cpf_usuario,
'email_usuario': self.email_usuario,
'endereco_usuario': self.endereco_usuario,
'numero_end_usuario': self.numero_end_usuario,
'complemento_usuario': self.complemento_usuario,
'bairro_usuario': self.bairro_usuario,
'cep_usuario': self.cep_usuario,
'ativado': self.ativado
}
@classmethod
def achar_usuario(cls, id_usuario):
usuario = cls.query.filter_by(id_usuario=id_usuario).first() # SELECT * FROM usuario WHERE id_usuario = $id_usuario
if usuario:
return usuario
return None
@classmethod
def achar_por_login(cls, email_usuario):
usuario = cls.query.filter_by(email_usuario=email_usuario).first()
if usuario:
return usuario
return None
def salvar_usuario(self):
banco.session.add(self)
banco.session.commit()
def atualizar_usuario(self, nome_usuario, telefone_usuario, cpf_usuario, email_usuario, senha_usuario, endereco_usuario, numero_end_usuario, complemento_usuario, bairro_usuario, cep_usuario, ativado):
self.nome_usuario = nome_usuario
self.telefone_usuario = telefone_usuario
self.cpf_usuario = cpf_usuario
self.email_usuario == email_usuario
self.senha_usuario == senha_usuario
self.endereco_usuario = endereco_usuario
self.numero_end_usuario = numero_end_usuario
self.complemento_usuario = complemento_usuario
self.bairro_usuario = bairro_usuario
self.cep_usuario = cep_usuario
self.ativado == ativado
def deletar_usuario(self):
banco.session.delete(self)
banco.session.commit()
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,290
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/models/vendedor.py
|
from sql_alchemy import banco
from flask import url_for
from flask import request
from requests import post
import mailgun
domain = mailgun.MAILGUN_DOMAIN
site_key = mailgun.MAILGUN_API_KEY
from_title = mailgun.FROM_TITLE
from_email = mailgun.FROM_EMAIL
class VendedorModel(banco.Model):
__tablename__ = 'vendedores'
id_vendedor = banco.Column(banco.Integer, primary_key=True)
nome_vendedor = banco.Column(banco.String(50))
telefone_vendedor = banco.Column(banco.String(11))
cnpj_vendedor = banco.Column(banco.String(14))
email_vendedor = banco.Column(banco.String(50))
senha_vendedor = banco.Column(banco.String(30))
endereco_vendedor = banco.Column(banco.String(40))
numero_end_vendedor = banco.Column(banco.String(10))
complemento_vendedor = banco.Column(banco.String(30))
bairro_vendedor = banco.Column(banco.String(20))
cep_vendedor = banco.Column(banco.String(8))
ativado = banco.Column(banco.Boolean, default=False)
def __init__(self, nome_vendedor, telefone_vendedor, cnpj_vendedor, email_vendedor, senha_vendedor, endereco_vendedor, numero_end_vendedor, complemento_vendedor, bairro_vendedor, cep_vendedor, ativado):
self.nome_vendedor = nome_vendedor
self.telefone_vendedor = telefone_vendedor
self.cnpj_vendedor = cnpj_vendedor
self.email_vendedor = email_vendedor
self.senha_vendedor = senha_vendedor
self.endereco_vendedor = endereco_vendedor
self.numero_end_vendedor = numero_end_vendedor
self.complemento_vendedor = complemento_vendedor
self.bairro_vendedor = bairro_vendedor
self.cep_vendedor = cep_vendedor
self.ativado = ativado
def enviar_email_confirmacao_vendedor(self):
#http://127.0.0.1:5000/confirmacao_vendedor/
link_vendedor = request.url_root[:-1] + url_for('vendedorconfirmado', id_vendedor=self.id_vendedor)
return post('https://api.mailgun.net/v3/{}/messages'.format(domain),
auth=('api', site_key),
data={'from': '{} <{}>'.format(from_title, from_email),
'to': self.email_vendedor,
'subject': 'Confirmação de Cadastro',
'text': 'Confirme seu cadastro clicando no link a seguir: {}'.format(link_vendedor),
'html': '<html><p>\
Confirme seu cadastro clicando no link a seguir:<a href="{}">CONFIRMAR EMAIL</a>\
</p><html>'.format(link_vendedor)
})
def json(self):
return {
'nome_vendedor': self.nome_vendedor,
'telefone_vendedor': self.telefone_vendedor,
'cnpj_vendedor': self.cnpj_vendedor,
'email_vendedor': self.email_vendedor,
'endereco_vendedor': self.endereco_vendedor,
'numero_end_vendedor': self.numero_end_vendedor,
'complemento_vendedor': self.complemento_vendedor,
'bairro_vendedor': self.bairro_vendedor,
'cep_vendedor': self.cep_vendedor,
'ativado': self.ativado
}
@classmethod
def achar_vendedor(cls, id_vendedor):
vendedor = cls.query.filter_by(id_vendedor=id_vendedor).first() # SELECT * FROM vendedor WHERE id_vendedor = $id_vendedor
if vendedor:
return vendedor
return None
@classmethod
def achar_por_login(cls, email_vendedor):
vendedor = cls.query.filter_by(email_vendedor=email_vendedor).first()
if vendedor:
return vendedor
return None
def salvar_vendedor(self):
banco.session.add(self)
banco.session.commit()
def atualizar_vendedor(self, nome_vendedor, telefone_vendedor, cnpj_vendedor, email_vendedor, senha_vendedor, endereco_vendedor, numero_end_vendedor, complemento_vendedor, bairro_vendedor, cep_vendedor, ativado):
self.nome_vendedor = nome_vendedor
self.telefone_vendedor = telefone_vendedor
self.cnpj_vendedor = cnpj_vendedor
self.email_vendedor == email_vendedor
self.senha_vendedor == senha_vendedor
self.endereco_vendedor = endereco_vendedor
self.numero_end_vendedor = numero_end_vendedor
self.complemento_vendedor = complemento_vendedor
self.bairro_vendedor = bairro_vendedor
self.cep_vendedor = cep_vendedor
self.ativado == ativado
def deletar_vendedor(self):
banco.session.delete(self)
banco.session.commit()
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,291
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/app.py
|
from werkzeug.exceptions import PreconditionRequired
from models.entregador import EntregadorModel
from models.vendedor import VendedorModel
from flask import Flask, render_template, request, flash, redirect, url_for, send_from_directory
from flask_restful import Api
from models.usuario import UsuarioModel
from resources.vendedor import Vendedores, VendedorConfirmado, VendedorRegistro, VendedorLogin, VendedorLogout
from resources.usuario import UsuarioConfirmado, Usuarios, UsuarioRegistro, UsuarioLogin, UsuarioLogout
from resources.entregador import Entregadores, EntregadorConfirmado, EntregadorRegistro, EntregadorLogin, EntregadorLogout
from resources.produtos import Produto, ProdutosRegistro, ProdutosBuscaSimples, listar_produtos
from werkzeug.security import safe_str_cmp
import secretkeys
import requests
import json
import werkzeug
app = Flask(__name__, template_folder='templates')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
api = Api(app)
def is_human_vendedor(captcha_response):
""" Validating recaptcha response from google server
Returns True captcha test passed for submitted form else returns False.
"""
secret = secretkeys.SECRET_KEY_VENDEDOR
payload = {'response': captcha_response, 'secret': secret}
response = requests.post("https://www.google.com/recaptcha/api/siteverify", payload)
response_text = json.loads(response.text)
return response_text['success']
def is_human_usuario(captcha_response):
"""Validating recaptcha response from google server
Returns True captcha test passed for submitted form else returns False."""
secret = secretkeys.SECRET_KEY_USUARIO
payload = {'response': captcha_response, 'secret': secret}
response = requests.post("https://www.google.com/recaptcha/api/siteverify", payload)
response_text = json.loads(response.text)
return response_text['success']
def is_human_entregador(captcha_response):
# Validating recaptcha response from google server
# Returns True captcha test passed for submitted form else returns False.
secret = secretkeys.SECRET_KEY_ENTREGADOR
payload = {'response': captcha_response, 'secret': secret}
response = requests.post("https://www.google.com/recaptcha/api/siteverify", payload)
response_text = json.loads(response.text)
return response_text['success']
@app.before_first_request
def cria_banco():
banco.create_all()
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/home_login')
def main_home():
#if login_ok_vend(request):
return render_template('home_login.html')
#return render_template('login.html', message="Sem autorização")"""
@app.route('/')
def login():
return render_template('login.html')
@app.route("/login_vendedor")
def login_vendedor():
return render_template('login_vendedor.html')
@app.route('/cadastros')
def cadastros():
return render_template('cadastros.html')
@app.route('/cadastro_vendedor')
def vendedor():
if request.method == 'POST':
nome_vendedor = request.form['nome_vendedor']
telefone_vendedor = request.form['telefone_vendededor']
cnpj_vendedor = request.form['cnpj_vendedor']
email_vendedor = request.form['email_vendedor']
senha_vendedor = request.form['senha_vendedor']
endereco_vendedor = request.form['endereco_vendedor']
numero_end_vendedor = request.form['numero_end_vendedor']
complemento_vendedor = request.form['complemento_vendedor']
bairro_vendedor = request.form['bairro_vendedor']
cep_vendedor = request.form['cep_vendedor']
captcha_response = request.form['g-recaptcha-response']
if is_human_vendedor(captcha_response):
VendedorModel(nome_vendedor, telefone_vendedor, cnpj_vendedor, email_vendedor, senha_vendedor, endereco_vendedor, numero_end_vendedor, complemento_vendedor, bairro_vendedor, cep_vendedor)
status = 'Enviado com sucesso'
else:
status = 'Im not a robot não pode ficar vazio!.'
flash(status)
return render_template('cadastro_vendedores.html', sitekey=secretkeys.sitekey_vendedor)
@app.route('/cadastro_usuario', methods=["GET", "POST"])
def usuario():
if request.method == 'POST':
nome_usuario = request.form['nome_usuario']
telefone_usuario = request.form['telefone_usuario']
cpf_usuario = request.form['cpf_usuario']
email_usuario = request.form['email_usuario']
senha_usuario = request.form['senha_usuario']
endereco_usuario = request.form['endereco_usuario']
numero_end_usuario = request.form['numero_end_usuario']
complemento_usuario = request.form['complemento_usuario']
bairro_usuario = request.form['bairro_usuario']
cep_usuario = request.form['cep_usuario']
captcha_response = request.form['g-recaptcha-response']
if is_human_usuario(captcha_response):
UsuarioModel(nome_usuario, telefone_usuario, cpf_usuario, email_usuario, senha_usuario, endereco_usuario, numero_end_usuario, complemento_usuario, bairro_usuario, cep_usuario)
status = 'Enviado com sucesso'
else:
status = 'Im not a robot não pode ficar vazio!.'
flash(status)
return render_template('cadastro_usuario.html',sitekey=secretkeys.sitekey_usuario)
@app.route('/cadastro_entregador', methods=["GET", "POST"])
def entregador():
if request.method == 'POST':
nome_entregador = request.form['nome_entregador']
cpf_entregador = request.form['cpf_entregador']
telefone_entregador = request.form['telefone_entregador']
cnh_entregador = request.form['cnh_entregador']
email_entregador = request.form['email_entregador']
senha_entregador = request.form['senha_entregador']
endereco_entregador = request.form['endereco_entregador']
numero_end_entregador = request.form['numero_end_entregador']
complemento_entregador = request.form['complemento_entregador']
bairro_entregador = request.form['bairro_entregador']
cep_entregador = request.form['cep_entregador']
captcha_response = request.form['g-recaptcha-response']
if is_human_entregador(captcha_response):
EntregadorModel(nome_entregador, cpf_entregador, telefone_entregador, cnh_entregador, email_entregador, senha_entregador, endereco_entregador, numero_end_entregador, complemento_entregador, bairro_entregador, cep_entregador)
status = 'Enviado com sucesso'
else:
status = 'Im not a robot não pode ficar vazio!'
flash(status)
return redirect(url_for('cadastro_entregador'))
return render_template('cadastro_entregador.html', sitekey=secretkeys.sitekey_entregador)
@app.route('/produtos')
def produtos():
produtos = listar_produtos()
#if login_ok(request):
return render_template('produtos.html', produtos=produtos)
@app.route('/produtos/registro')
def registro():
#logado = login_ok_vend()
#if logado is None:
#return render_template('home.html')
return render_template('registro_produtos.html')
def login_ok(req):
login = req.cookies.get("login")
senha = req.cookies.get("senha")
user = UsuarioModel.achar_por_login(login)
return user is not None and safe_str_cmp(user.senha_usuario, senha)
def login_ok_vend(request):
login = request.cookies.get("login")
senha = request.cookies.get("senha")
ativado = request.cookies.get("ativado")
user = VendedorModel.achar_por_login(login)
return user is not None and safe_str_cmp(user.senha_vendedor, senha)
api.add_resource(VendedorRegistro, '/vendedor_cadastro') # POST
api.add_resource(VendedorLogin, '/vendedor_login') # POST
api.add_resource(VendedorLogout, '/logout') # POST
api.add_resource(Vendedores, '/vendedores/<int:id_vendedor>') # GET
api.add_resource(Usuarios, '/usuarios/<int:id_usuario>')
api.add_resource(UsuarioRegistro, '/usuario_cadastro') # POST
api.add_resource(UsuarioLogin, '/usuario_login') # POST
api.add_resource(UsuarioLogout, '/logout') # POST
api.add_resource(Entregadores, '/entregador/<int:id_entregador>')
api.add_resource(EntregadorRegistro, '/entregador_cadastro') # POST
api.add_resource(EntregadorLogin, '/entregador_login') # POST
api.add_resource(EntregadorLogout, '/logout') # POST
api.add_resource(UsuarioConfirmado, '/confirmacao/<int:id_usuario>') # GET
api.add_resource(VendedorConfirmado, '/confirmacao_vendedor/<int:id_vendedor>') # GET
api.add_resource(EntregadorConfirmado, '/confirmacao_entregador/<int:id_entregador>') # GET
api.add_resource(Produto, '/produto/<int:id_produto>') # POST
api.add_resource(ProdutosRegistro, '/produto/registro') # POST
api.add_resource(ProdutosBuscaSimples, '/produto/busca') # GET
# api.add_resource(ProdutosBuscaFiltro, '/produto/busca/<string:filtro_produto>') # GET
if __name__ == '__main__':
from sql_alchemy import banco
banco.init_app(app)
app.run(debug=True)
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,292
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/models/produtos.py
|
from sql_alchemy import banco
import random
class ProdutosModel(banco.Model):
__tablename__ = 'produto'
id_produto = banco.Column(banco.Integer, primary_key=True)
nome_produto = banco.Column(banco.String(50), nullable=False)
codigo_produto = banco.Column(banco.String(8), unique=True, nullable=False)
descricao_produto = banco.Column(banco.String(100))
preco_produto = banco.Column(banco.Float(precision=2))
tipo_produto = banco.Column(banco.String(15))
filtro_produto = banco.Column(banco.String(10))
quantidade_produto = banco.Column(banco.String(10))
def __init__(self, nome_produto, codigo_produto, descricao_produto, preco_produto, tipo_produto, filtro_produto, quantidade_produto):
self.nome_produto = nome_produto
self.codigo_produto = codigo_produto
self.descricao_produto = descricao_produto
self.preco_produto = preco_produto
self.tipo_produto = tipo_produto
self.filtro_produto = filtro_produto
self.quantidade_produto = quantidade_produto
def json(self):
return {
'nome_produto': self.nome_produto,
'codigo_produto': self.codigo_produto,
'descricao_produto': self.descricao_produto,
'preco_produto': self.preco_produto,
'tipo_produto': self.tipo_produto,
'filtro_produto': self.filtro_produto,
'quantidade_produto': self.quantidade_produto
}
@classmethod
def listar_por_id(cls, id_produto):
produto = cls.query.filter_by(id_produto=id_produto).first() # SELECT * FROM produto WHERE id_produto = $id_produto
if produto:
return produto
return None
@classmethod
def achar_por_codigo(cls, codigo_produto):
codigo = cls.query.filter_by(codigo_produto=codigo_produto).first()
if codigo:
return codigo
return None
"""criar um @classmethod, def, de produtos, buscar apenas produtos em uma barra de buscas, retornar por nome
(fazer checagem via nome, se um produto se chamar "shampoo cachorro 200 ml", e o usuário digitar "shampoo", retornar
todos os produtos com "shampoo" no nome, independente se for na primeira ou ultima string, complicadinho, sim) """
# @classmethod
# def listar_produtos_busca(cls, nome_produto):
@classmethod
def listar_produtos_especifico(cls, filtro_produto):
produto_especifico = cls.query.filter_by(filtro_produto=filtro_produto)
if produto_especifico:
return produto_especifico
return None
def salvar_produto(self):
banco.session.add(self)
banco.session.commit()
def atualizar_produto(self, nome_produto, codigo_produto, descricao_produto, preco_produto, tipo_produto, filtro_produto, quantidade_produto):
self.nome_produto = nome_produto
self.codigo_produto = codigo_produto
self.descricao_produto = descricao_produto
self.preco_produto = preco_produto
self.tipo_produto = tipo_produto
self.filtro_produto = filtro_produto
self.quantidade_produto = quantidade_produto
def deletar_produto(self):
banco.session.delete(self)
banco.session.commit()
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,293
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/models/entregador.py
|
from sql_alchemy import banco
from flask import request
from requests import post
from flask.helpers import url_for
import mailgun
domain = mailgun.MAILGUN_DOMAIN
api_key = mailgun.MAILGUN_API_KEY
title = mailgun.FROM_TITLE
email = mailgun.FROM_EMAIL
class EntregadorModel(banco.Model):
__tablename__ = 'entregador'
id_entregador = banco.Column(banco.Integer, primary_key=True)
nome_entregador = banco.Column(banco.String(200))
cpf_entregador = banco.Column(banco.String(12))
telefone_entregador = banco.Column(banco.String(11))
cnh_entregador = banco.Column(banco.String(100))
email_entregador = banco.Column(banco.String(50))
senha_entregador = banco.Column(banco.String(30))
endereco_entregador = banco.Column(banco.String(40))
numero_end_entregador = banco.Column(banco.String(10))
complemento_entregador = banco.Column(banco.String(30))
bairro_entregador = banco.Column(banco.String(20))
cep_entregador = banco.Column(banco.String(8))
ativado = banco.Column(banco.Boolean, default=False)
def __init__(self, nome_entregador, cpf_entregador, telefone_entregador, cnh_entregador, email_entregador, senha_entregador, endereco_entregador, numero_end_entregador, complemento_entregador, bairro_entregador, cep_entregador, ativado):
self.nome_entregador = nome_entregador
self.cpf_entregador = cpf_entregador
self.telefone_entregador = telefone_entregador
self.cnh_entregador = cnh_entregador
self.email_entregador = email_entregador
self.senha_entregador = senha_entregador
self.endereco_entregador = endereco_entregador
self.numero_end_entregador = numero_end_entregador
self.complemento_entregador = complemento_entregador
self.bairro_entregador = bairro_entregador
self.cep_entregador = cep_entregador
self.ativado = ativado
def enviar_confirmacao_email_entregador(self):
# http://127.0.0.1:5000/confirmacao_entregador/
link = request.url_root[:-1] + url_for('entregadorconfirmado', id_entregador=self.id_entregador)
return post('https://api.mailgun.net/v3/{}/messages'.format(domain),
auth=('api', api_key),
data={'from': '{} <{}>'.format(title, email),
'to': self.email_entregador,
'subject': 'Confirmação de Cadastro',
'text': 'Confirme seu cadastro clicando no link a seguir: {}'.format(link),
'html': '<html><p>\
Confirme seu cadastro clicando no link a seguir:<a href="{}">CONFIRMAR EMAIL</a>\
</p><html>'.format(link)
})
def json(self):
return {
'nome_entregador': self.nome_entregador,
'cpf_entregador': self.cpf_entregador,
'telefone_entregador': self.telefone_entregador,
'cnh_entregador': self.cnh_entregador,
'email_entregador': self.email_entregador,
'endereco_entregador': self.endereco_entregador,
'numero_end_entregador': self.numero_end_entregador,
'complemento_entregador': self.complemento_entregador,
'bairro_entregador': self.numero_end_entregador,
'cep_entregador': self.cep_entregador,
'ativado': self.ativado
}
@classmethod
def achar_entregador(cls, id_entregador):
entregador = cls.query.filter_by(id_entregador=id_entregador).first() # SELECT * FROM usuario WHERE id_usuario = $id_usuario
if entregador:
return entregador
return None
@classmethod
def achar_por_login(cls, email_entregador):
entregador = cls.query.filter_by(email_entregador=email_entregador).first()
if entregador:
return entregador
return None
def salvar_entregador(self):
banco.session.add(self)
banco.session.commit()
def atualizar_entregador(self, nome_entregador, cpf_entregador, telefone_entregador, cnh_entregador, email_entregador, senha_entregador, endereco_entregador, numero_end_entregador, complemento_entregador, bairro_entregador, cep_entregador, ativado):
if email_entregador == email_entregador:
if senha_entregador == senha_entregador:
self.nome_entregador = nome_entregador
self.cpf_entregador = cpf_entregador
self.telefone_entregador = telefone_entregador
self.cnh_entregador = cnh_entregador
self.endereco_entregador = endereco_entregador
self.numero_end_entregador = numero_end_entregador
self.complemento_entregador = complemento_entregador
self.bairro_entregador = bairro_entregador
self.cep_entregador = cep_entregador
self.ativado == ativado
def deletar_entregador(self):
banco.session.delete(self)
banco.session.commit()
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,294
|
andreyhitoshi1997/AC-Desenvolvimento-Sistemas
|
refs/heads/master
|
/resources/usuario.py
|
from flask_restful import Resource, reqparse
from flask import render_template
from models.usuario import UsuarioModel
from flask.helpers import make_response
from werkzeug.security import safe_str_cmp
import traceback
atributos = reqparse.RequestParser()
atributos.add_argument('nome_usuario', type=str, required=True, help="Ei! o seu 'nome' é obrigatório!")
atributos.add_argument('telefone_usuario', type=str, required=True, help="Ei! o seu 'telefone' é obrigatório!")
atributos.add_argument('cpf_usuario', type=str, required=True, help="Ei! o seu 'cpf' é obrigatório!")
atributos.add_argument('email_usuario', type=str, required=True, help="Ei! o seu 'e-mail' é obrigatório!")
atributos.add_argument('senha_usuario', type=str, required=True, help="Ei! a sua 'senha' é obrigatória!")
atributos.add_argument('endereco_usuario', type=str, required=True, help="Ei! o seu 'endereço' é obrigatório!")
atributos.add_argument('numero_end_usuario', type=str, required=True, help="Ei! o seu 'número de endereço' é obrigatório!")
atributos.add_argument('complemento_usuario', type=str, required=False)
atributos.add_argument('bairro_usuario', type=str, required=True, help="Ei! o seu 'bairro' é obrigatório!")
atributos.add_argument('cep_usuario', type=str, required=True, help="Ei! o seu 'cep' é obrigatório!")
atributos.add_argument('ativado', type=bool)
atributos_login = reqparse.RequestParser()
atributos_login.add_argument('email_usuario', type=str, required=True, help="Ei! o seu 'e-mail' é obrigatório!")
atributos_login.add_argument('senha_usuario', type=str, required=True, help="Ei! a sua 'senha' é obrigatória!")
atributos_login.add_argument('ativado', type=bool)
class Usuarios(Resource):
def get(self, id_usuario):
usuario = UsuarioModel.achar_usuario(id_usuario)
if usuario:
return usuario.json()
return make_response(render_template("home.html", message="Usuário não encontrado."), 404)
def put(self, id_usuario):
dados = atributos.parse_args()
usuario = UsuarioModel.achar_usuario(id_usuario)
if usuario:
usuario.atualizar_usuario(**dados)
usuario.salvar_usuario()
return {"message": "Usuário atualizado com sucesso!"}, 200
usuario = UsuarioModel(**dados)
usuario.salvar_usuario()
return make_response(render_template("home.html", message="Vendedor criado com sucesso!"), 201)
def delete(self, id_usuario):
user = UsuarioModel.achar_usuario(id_usuario)
if user:
user.deletar_usuario()
return make_response(render_template("home.html", message='Usuário deletado com sucesso!'), 200)
return make_response(render_template("home.html", message='Usuário não encontrado!'), 404)
class UsuarioRegistro(Resource):
def post(self):
dados = atributos.parse_args()
if UsuarioModel.achar_por_login(dados['email_usuario']):
return {"message": "O seu login '{}' já existe".format(dados['email_usuario'])}
user = UsuarioModel(**dados)
user.ativado = False
try:
user.salvar_usuario()
user.enviar_confirmacao_email()
except:
user.deletar_usuario()
traceback.print_exc()
return make_response(render_template("cadastro_usuario.html", message='Erro interno de servidor'), 500)
return make_response(render_template("login.html", message="Sucesso! Cadastro pendente de confirmação via email"), 201)
class UsuarioLogin(Resource):
@classmethod
def post(cls):
dados = atributos_login.parse_args()
user = UsuarioModel.achar_por_login(dados['email_usuario'])
if user and safe_str_cmp(user.senha_usuario, dados['senha_usuario']):
if user.ativado:
r = make_response(render_template("home.html", message='Login realizado com sucesso!'), 200)
r.set_cookie("login", dados['email_usuario'], samesite="Strict")
r.set_cookie("senha", dados['senha_usuario'], samesite="Strict")
return r
return make_response(render_template("login.html", message='Usuário não confirmado'), 400)
return make_response(render_template("login.html", message='Usuário ou senha incorretos.'), 401)
class UsuarioLogout(Resource):
def post(self):
r = make_response(render_template("cadastro_usuario.html", message="Deslogou com sucesso!"))
r.set_cookie("email_usuario", "")
r.set_cookie("senha", "")
# jwt_id = get_raw_jwt()['jti'] # JWT Token Identifier
# BLACKLIST.add(jwt_id)
return r
class UsuarioConfirmado(Resource):
@classmethod
def get(cls, id_usuario):
user = UsuarioModel.achar_usuario(id_usuario)
if not user:
return {'message': 'Usuário não encontrado'}, 404
user.ativado = True
user.salvar_usuario()
headers = {'Content-Type': 'text/html'}
return make_response(render_template('usuario_confirmado.html', email='email_usuario'), 200, headers)
|
{"/resources/vendedor.py": ["/models/vendedor.py"], "/resources/produtos.py": ["/models/produtos.py"], "/resources/entregador.py": ["/models/entregador.py"], "/app.py": ["/models/entregador.py", "/models/vendedor.py", "/models/usuario.py", "/resources/vendedor.py", "/resources/usuario.py", "/resources/entregador.py", "/resources/produtos.py"], "/resources/usuario.py": ["/models/usuario.py"]}
|
15,295
|
jeanson-JinSheng/Pytorch-ENet-Nice
|
refs/heads/master
|
/train.py
|
import torch
import torch.nn as nn
from utils import *
from models.ENet import ENet
from models.ENet_encoder import ENet_encoder
import sys
from tqdm import tqdm
def train(FLAGS):
# Defining the hyperparameters
device = FLAGS.cuda
batch_size = FLAGS.batch_size
epochs = FLAGS.epochs
lr = FLAGS.learning_rate
print_every = FLAGS.print_every
eval_every = FLAGS.eval_every
save_every = FLAGS.save_every
nc = FLAGS.num_classes
wd = FLAGS.weight_decay
ip = FLAGS.input_path_train
lp = FLAGS.label_path_train
ipv = FLAGS.input_path_val
lpv = FLAGS.label_path_val
train_mode = FLAGS.train_mode
pretrain_model = FLAGS.pretrain_model
cityscapes_path = FLAGS.cityscapes_path
resume_model_path = FLAGS.resume_model_path
print ('[INFO]Defined all the hyperparameters successfully!')
# Get the class weights
print ('[INFO]Starting to define the class weights...')
if len(cityscapes_path):
pipe = loader_cityscapes(ip, cityscapes_path, batch_size='all')
class_weights = get_class_weights(pipe, nc, isCityscapes=True)
#class_weights = np.array([3.03507951, 13.09507946, 4.54913664, 37.64795738, 35.78537802, 31.50943831, 45.88744201, 39.936759,
# 6.05101481, 31.85754823, 16.92219283, 32.07766734, 47.35907214, 11.34163794, 44.31105748, 45.81085476,
# 45.67260936, 48.3493813, 42.02189188])
else:
pipe = loader(ip, lp, batch_size='all')
class_weights = get_class_weights(pipe, nc)
print ('[INFO]Fetched all class weights successfully!')
# Get an instance of the model
if train_mode.lower() == 'encoder-decoder':
enet = ENet(nc)
if len(pretrain_model):
checkpoint0 = torch.load(pretrain_model)
pretrain_dict = checkpoint0['state_dict']
enet_dict = enet.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in enet_dict}
enet_dict.update(pretrain_dict)
enet.load_state_dict(enet_dict)
print('[INFO]Previous model Instantiated!')
else:
enet = ENet_encoder(nc)
print ('[INFO]Model Instantiated!')
# Move the model to cuda if available
enet = enet.to(device)
# Define the criterion and the optimizer
if len(cityscapes_path):
criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor(class_weights).to(device), ignore_index=255)
else:
criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor(class_weights).to(device))
optimizer = torch.optim.Adam(enet.parameters(), lr=lr, weight_decay=wd)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=0.5, patience=2, verbose=True, threshold=0.01)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=0.5, patience=2, verbose=True,
# threshold=0.005)
print ('[INFO]Defined the loss function and the optimizer')
# Training Loop starts
print ('[INFO]Staring Training...')
print ()
train_losses = []
eval_losses = []
if len(cityscapes_path):
# Assuming we are using the Cityscapes Dataset
bc_train = 2975 // batch_size
bc_eval = 500 // batch_size
pipe = loader_cityscapes(ip, cityscapes_path, batch_size)
eval_pipe = loader_cityscapes(ipv, cityscapes_path, batch_size)
else:
# Assuming we are using the CamVid Dataset
bc_train = 367 // batch_size
bc_eval = 101 // batch_size
pipe = loader(ip, lp, batch_size)
eval_pipe = loader(ipv, lpv, batch_size)
epoch = 1
if len(resume_model_path):
checkpoint1 = torch.load(resume_model_path)
epoch = checkpoint1['epochs'] + 1
enet.load_state_dict(checkpoint1['state_dict'])
epochs = epochs
for e in range(epoch, epochs+1):
train_loss = 0
print ('-'*15,'Epoch %d' % e, '-'*15)
enet.train()
for _ in tqdm(range(bc_train)):
X_batch, mask_batch = next(pipe)
#assert (X_batch >= 0. and X_batch <= 1.0).all()
X_batch, mask_batch = X_batch.to(device), mask_batch.to(device)
optimizer.zero_grad()
out = enet(X_batch.float())
loss = criterion(out, mask_batch.long())
loss.backward()
optimizer.step()
train_loss += loss.item()
print ()
train_losses.append(train_loss)
if (e+1) % print_every == 0:
print ('Epoch {}/{}...'.format(e, epochs),
'Loss {:6f}'.format(train_loss))
scheduler.step(train_loss)
if e % eval_every == 0:
with torch.no_grad():
enet.eval()
eval_loss = 0
for _ in tqdm(range(bc_eval)):
inputs, labels = next(eval_pipe)
inputs, labels = inputs.to(device), labels.to(device)
out = enet(inputs)
loss = criterion(out, labels.long())
eval_loss += loss.item()
print ()
print ('Loss {:6f}'.format(eval_loss))
eval_losses.append(eval_loss)
if e % save_every == 0:
checkpoint = {
'epochs' : e,
'state_dict' : enet.state_dict()
}
if train_mode.lower() == 'encoder-decoder':
torch.save(checkpoint,
'./logs/ckpt-enet-{}-{}-{}.pth'.format(e, optimizer.state_dict()['param_groups'][0]['lr'],
train_loss))
else:
torch.save(checkpoint,
'./logs/ckpt-enet_encoder-{}-{}-{}.pth'.format(e, optimizer.state_dict()['param_groups'][0]['lr'],
train_loss))
print ('Model saved!')
print ('Epoch {}/{}...'.format(e+1, epochs),
'Total Mean Loss: {:6f}'.format(sum(train_losses) / epochs))
print ('[INFO]Training Process complete!')
|
{"/train.py": ["/utils.py"], "/init.py": ["/train.py"]}
|
15,296
|
jeanson-JinSheng/Pytorch-ENet-Nice
|
refs/heads/master
|
/datasets/Cityscapes/makepairs.py
|
import os
import sys
import cv2
import numpy as np
def makepairs():
train = open("train_cityscapes.txt", 'w')
val = open("val_cityscapes.txt", 'w')
dataname = ["train/", "val/"]
for i in range(2):
imagedirs = os.listdir("leftImg8bit/" + dataname[i])
dir_num = len(imagedirs)
for j in range(dir_num):
images = os.listdir("leftImg8bit/" + dataname[i] + "/" + imagedirs[j])
image_num = len(images)
for k in range(image_num):
image = images[k]
lists = image.split('leftImg8bit.png')
if i == 0:
train.write("/Cityscapes/leftImg8bit/train/" + imagedirs[j] + "/" + image + " /Cityscapes/gtFine/train/" + imagedirs[j] + "/" + lists[0] + "gtFine_labelTrainIds.png\n")
else:
val.write("/Cityscapes/leftImg8bit/val/" + imagedirs[j] + "/" + image + " /Cityscapes/gtFine/val/" + imagedirs[j] + "/" + lists[0] + "gtFine_labelTrainIds.png\n")
if __name__ == '__main__':
makepairs()
|
{"/train.py": ["/utils.py"], "/init.py": ["/train.py"]}
|
15,297
|
jeanson-JinSheng/Pytorch-ENet-Nice
|
refs/heads/master
|
/utils.py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import sys
import os
from PIL import Image
import torch
def create_class_mask(img, color_map, is_normalized_img=True, is_normalized_map=False, show_masks=False):
"""
Function to create C matrices from the segmented image, where each of the C matrices is for one class
with all ones at the pixel positions where that class is present
img = The segmented image
color_map = A list with tuples that contains all the RGB values for each color that represents
some class in that image
is_normalized_img = Boolean - Whether the image is normalized or not
If normalized, then the image is multiplied with 255
is_normalized_map = Boolean - Represents whether the color map is normalized or not, if so
then the color map values are multiplied with 255
show_masks = Wherether to show the created masks or not
"""
if is_normalized_img and (not is_normalized_map):
img *= 255
if is_normalized_map and (not is_normalized_img):
img = img / 255
mask = []
hw_tuple = img.shape[:-1]
for color in color_map:
color_img = []
for idx in range(3):
color_img.append(np.ones(hw_tuple) * color[idx])
color_img = np.array(color_img, dtype=np.uint8).transpose(1, 2, 0)
mask.append(np.uint8((color_img == img).sum(axis = -1) == 3))
return np.array(mask)
def loader(training_path, segmented_path, batch_size, h=512, w=512):
"""
The Loader to generate inputs and labels from the Image and Segmented Directory
Arguments:
training_path - str - Path to the directory that contains the training images
segmented_path - str - Path to the directory that contains the segmented images
batch_size - int - the batch size
yields inputs and labels of the batch size
"""
filenames_t = os.listdir(training_path)
total_files_t = len(filenames_t)
filenames_s = os.listdir(segmented_path)
total_files_s = len(filenames_s)
assert(total_files_t == total_files_s)
if str(batch_size).lower() == 'all':
batch_size = total_files_s
idx = 0
while(1):
batch_idxs = np.random.randint(0, total_files_s, batch_size)
inputs = []
labels = []
for jj in batch_idxs:
img = plt.imread(training_path + filenames_t[jj])
#img = cv2.resize(img, (h, w), cv2.INTER_NEAREST)
inputs.append(img)
img = Image.open(segmented_path + filenames_s[jj])
img = np.array(img)
#img = cv2.resize(img, (h, w), cv2.INTER_NEAREST)
labels.append(img)
inputs = np.stack(inputs, axis=2)
inputs = torch.tensor(inputs).transpose(0, 2).transpose(1, 3)
labels = torch.tensor(labels)
yield inputs, labels
def loader_cityscapes(txt_path, cityscapes_path, batch_size):
"""
The Loader to generate inputs and labels from the txt file
Arguments:
txt_path - str - Path to the txt file that contains the training images and segmented images path
cityscapes_path - str - Cityscapes Path to the directory of Cityscapes image
batch_size - int - the batch size
yields inputs and labels of the batch size
"""
lines = open(txt_path, 'r').readlines()
total_files = len(lines)
images = []
gts = []
for line in lines:
line = line.strip().split(" ")
images.append(line[0])
gts.append(line[1])
if str(batch_size).lower() == 'all':
batch_size = total_files
while (1):
batch_idxs = np.random.randint(0, total_files, batch_size)
labels = []
for jj in batch_idxs:
img = Image.open(cityscapes_path + gts[jj])
img = np.array(img)
#img5 = scale_downsample(img, 0.5, 0.5)
labels.append(img)
labels = torch.tensor(labels)
yield labels
idx = 0
while (1):
batch_idxs = np.random.randint(0, total_files, batch_size)
inputs = []
labels = []
for jj in batch_idxs:
img = plt.imread(cityscapes_path + images[jj])
#img5 = scale_downsample(img, 0.5, 0.5)
inputs.append(img)
img = Image.open(cityscapes_path + gts[jj])
img = np.array(img)
#img5 = scale_downsample(img, 0.5, 0.5)
labels.append(img)
inputs = np.stack(inputs, axis=2)
inputs = torch.tensor(inputs).transpose(0, 2).transpose(1, 3)
labels = torch.tensor(labels)
yield inputs, labels
def decode_segmap(image, cityscapes):
Sky = [128, 128, 128]
Building = [128, 0, 0]
Column_Pole = [192, 192, 128]
Road_marking = [255, 69, 0]
Road = [128, 64, 128]
Pavement = [60, 40, 222]
Tree = [128, 128, 0]
SignSymbol = [192, 128, 128]
Fence = [64, 64, 128]
Car = [64, 0, 128]
Pedestrain = [64, 64, 0]
Bicyclist = [0, 128, 192]
road = [128,64,128]
Lsidewalk = [244,35,232]
building = [70,70,70]
wall = [102,102,156]
fence = [190,153,153]
pole = [153,153,153]
traffic_light = [250,170,30]
traffic_sign = [220,220,0]
vegetation = [107,142,35]
terrain = [152,251,152]
sky = [70,130,180]
person = [220,20,60]
Lrider = [255,0,0]
car = [0,0,142]
truck = [0,0,70]
bus = [0,60,100]
train = [0,80,100]
motorcycle = [0,0,230]
bicycle = [119,11,32]
if cityscapes:
label_colors = np.array([road, Lsidewalk, building, wall, fence, pole, traffic_light, traffic_sign,
vegetation, terrain, sky, person, Lrider, car, truck, bus, train, motorcycle,
bicycle]).astype(np.uint8)
else:
label_colors = np.array([Sky, Building, Column_Pole, Road_marking, Road,
Pavement, Tree, SignSymbol, Fence, Car,
Pedestrain, Bicyclist]).astype(np.uint8)
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for label in range(len(label_colors)):
b[image == label] = label_colors[label, 0]
g[image == label] = label_colors[label, 1]
r[image == label] = label_colors[label, 2]
rgb = np.zeros((image.shape[0], image.shape[1], 3)).astype(np.uint8)
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
return rgb
def show_images(images, in_row=True):
'''
Helper function to show 3 images
'''
total_images = len(images)
rc_tuple = (1, total_images)
if not in_row:
rc_tuple = (total_images, 1)
#figure = plt.figure(figsize=(20, 10))
for ii in range(len(images)):
plt.subplot(*rc_tuple, ii+1)
plt.title(images[ii][0])
plt.axis('off')
plt.imshow(images[ii][1])
# plt.savefig("./Enet.png")
plt.show()
def get_class_weights(loader, num_classes, c=1.02, isCityscapes=False):
'''
This class return the class weights for each class
Arguments:
- loader : The generator object which return all the labels at one iteration
Do Note: That this class expects all the labels to be returned in
one iteration
- num_classes : The number of classes
Return:
- class_weights : An array equal in length to the number of classes
containing the class weights for each class
'''
if isCityscapes:
labels = next(loader)
else:
_, labels = next(loader)
all_labels = labels.flatten()
all_len = len(all_labels)
each_class = np.bincount(all_labels, minlength=num_classes)
if isCityscapes:
each_class = each_class[0:19]
num = 0
for i in each_class:
num += i
all_len = num
prospensity_score = each_class / all_len
class_weights = 1 / (np.log(c + prospensity_score))
print("class_weights: ")
print(class_weights)
return class_weights
def scale_downsample(img, kx, ky):
rows = int(np.round(np.abs(img.shape[0] * kx)))
cols = int(np.round(np.abs(img.shape[1] * ky)))
if len(img.shape) == 3 and img.shape[2] >= 3:
dist = np.zeros((rows, cols, img.shape[2]), img.dtype)
else:
dist = np.zeros((rows, cols), img.dtype)
for y in range(rows):
for x in range(cols):
new_y = int((y + 1) / ky + 0.5) - 1
new_x = int((x + 1) / kx + 0.5) - 1
dist[y, x] = img[new_y, new_x]
return dist
|
{"/train.py": ["/utils.py"], "/init.py": ["/train.py"]}
|
15,298
|
jeanson-JinSheng/Pytorch-ENet-Nice
|
refs/heads/master
|
/init.py
|
import numpy as np
import argparse
from train import *
from test import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m',
type=str,
default='./datasets/CamVid/ckpt-camvid-enet.pth',
help='The path to the pretrained enet model')
parser.add_argument('-i', '--image-path',
type=str,
help='The path to the image to perform semantic segmentation')
parser.add_argument('-rh', '--resize-height',
type=int,
default=1024,
help='The height for the resized image')
parser.add_argument('-rw', '--resize-width',
type=int,
default=512,
help='The width for the resized image')
parser.add_argument('-lr', '--learning-rate',
type=float,
default=5e-3,
help='The learning rate')
parser.add_argument('-bs', '--batch-size',
type=int,
default=10,
help='The batch size')
parser.add_argument('-wd', '--weight-decay',
type=float,
default=2e-4,
help='The weight decay')
parser.add_argument('-c', '--constant',
type=float,
default=1.02,
help='The constant used for calculating the class weights')
parser.add_argument('-e', '--epochs',
type=int,
default=102,
help='The number of epochs')
parser.add_argument('-nc', '--num-classes',
type=int,
default=12,
help='The number of classes')
parser.add_argument('-se', '--save-every',
type=int,
default=10,
help='The number of epochs after which to save a model')
parser.add_argument('-iptr', '--input-path-train',
type=str,
default='./datasets/CamVid/train/',
help='The path to the input dataset')
parser.add_argument('-lptr', '--label-path-train',
type=str,
default='./datasets/CamVid/trainannot/',
help='The path to the label dataset')
parser.add_argument('-ipv', '--input-path-val',
type=str,
default='./datasets/CamVid/val/',
help='The path to the input dataset')
parser.add_argument('-lpv', '--label-path-val',
type=str,
default='./datasets/CamVid/valannot/',
help='The path to the label dataset')
parser.add_argument('-iptt', '--input-path-test',
type=str,
default='./datasets/CamVid/test/',
help='The path to the input dataset')
parser.add_argument('-lptt', '--label-path-test',
type=str,
default='./datasets/CamVid/testannot/',
help='The path to the label dataset')
parser.add_argument('-pe', '--print-every',
type=int,
default=1,
help='The number of epochs after which to print the training loss')
parser.add_argument('-ee', '--eval-every',
type=int,
default=10,
help='The number of epochs after which to print the validation loss')
parser.add_argument('--cuda',
type=bool,
default=False,
help='Whether to use cuda or not')
parser.add_argument('--mode',
choices=['train', 'test'],
default='train',
help='Whether to train or test')
parser.add_argument('--test_mode',
choices=['cityscapes', 'camvid'],
default='cityscapes',
help='Whether to test cityscape model or camvid model')
parser.add_argument('--train_mode',
choices=['encoder-decoder', 'encoder'],
default='encoder-decoder',
help='Select to train mode of Enet')
parser.add_argument('--pretrain_model',
type=str,
default='',
help='Import previous train model of encoder ENet')
parser.add_argument('--cityscapes_path',
type=str,
default='',
help='Cityscapes Path to the directory of Cityscapes image')
parser.add_argument('--resume_model_path',
type=str,
default='',
help='Model path to resume training')
FLAGS, unparsed = parser.parse_known_args()
FLAGS.cuda = torch.device('cuda:0' if torch.cuda.is_available() and FLAGS.cuda else 'cpu')
if FLAGS.mode.lower() == 'train':
train(FLAGS)
elif FLAGS.mode.lower() == 'test':
test(FLAGS)
else:
raise RuntimeError('Unknown mode passed. \n Mode passed should be either of "train" or "test"')
|
{"/train.py": ["/utils.py"], "/init.py": ["/train.py"]}
|
15,299
|
jaspringer/pymzlib
|
refs/heads/master
|
/filters.py
|
"""
Filter.py
Author: Thomas McGrew
License:
MIT license.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as numerical
def lpf2d( data, threshold ):
"""
Performs a low pass filter on the passed in data.
:Parameters:
data : numerical.ndarray
A 2 dimensional array (matrix) to be filtered
threshold : int
The position of the cutoff for the filter. Should be from 0 to 1
rtype: numerical.ndarray
returns: The filtered data
"""
fftData = numerical.fft.fft2( data )
width, height = fftData.shape
for x in xrange( width ):
for y in xrange( height ):
if not _insideCircle( x, y, width, height, threshold ):
fftData[x][y] = 0
return abs( numerical.fft.ifft2( fftData ))
def hpf2d( data, threshold ):
"""
Performs a high pass filter on the passed in data.
:Parameters:
data : numerical.ndarray
A 2 dimensional array (matrix) to be filtered
threshold : int
The position of the cutoff for the filter. Should be from 0 to 1
rtype: numerical.ndarray
returns: The filtered data
"""
fftData = numerical.fft.fft2( data )
width, height = fftData.shape
for x in xrange( width ):
for y in xrange( height ):
if _insideCircle( x, y, width, height, threshold ):
fftData[x][y] = 0
return abs( numerical.fft.ifft2( fftData ))
def lpf( data, threshold ):
"""
Performs a low pass filter on the passed in data.
:Parameters:
data : numerical.ndarray
A 1 dimensional array to be filtered
threshold : int
The position of the cutoff for the filter. Should be from 0 to 1
rtype: numerical.ndarray
returns: The filtered data
"""
data = numerical.array( data )
fftData = numerical.fft.fft( data )
x = data.shape[0]
length = int(( x * threshold ) / 2 )
if not length:
return data
fftData[ length:-length ] = [0] * ( x - ( length * 2 ))
return numerical.fft.ifft( fftData )
def hpf( data, threshold ):
"""
Performs a high pass filter on the passed in data.
:Parameters:
data : numerical.ndarray
A 1 dimensional array to be filtered
threshold : int
The position of the cutoff for the filter. Should be from 0 to 1
rtype: numerical.ndarray
returns: The filtered data
"""
data = numerical.array( data )
fftData = numerical.fft.fft( data )
x = data.shape[0]
length = int(( x * threshold ) / 2 )
if not length:
return data
fftData[ :length ] = [0] * length
fftData[ -length: ] = [0] * length
return numerical.fft.ifft( fftData )
def bpf( data, lowThreshold, highThreshold ):
"""
Performs a band pass filter on the passed in data.
:Parameters:
data : numerical.ndarray
A 1 dimensional array to be filtered
lowThreshold : int
The position of the cutoff for the high pass filter. Should be from 0 to 1
highThreshold : int
The position of the cutoff for the low pass filter. Should be from 0 to 1
rtype: numerical.ndarray
returns: The filtered data
"""
data = numerical.array( data )
fftData = numerical.fft.fft( data )
x = data.shape[0]
length = int(( x * highThreshold ) / 2 )
if length:
fftData[ length:-length ] = [0] * ( x - ( length * 2 ))
length = int(( x * lowThreshold ) / 2 )
if length:
fftData[ :length ] = [0] * length
fftData[ -length: ] = [0] * length
return numerical.fft.ifft( fftData )
def _insideCircle( x, y, width, height, threshold ):
"""
Determines whether a particular position in the matrix is above or below the threshold
rtype: bool
returns: true if it is below the threshold, false otherwise
"""
fullDistance = math.sqrt( 2 * ( width/ 2 )**2 )
#distance = math.sqrt( abs( width/2 - x )**2 + ( float( abs( height/2 - y )) * width / height) ** 2 )
distance = math.sqrt( min( x, width - x )**2 + ( float( min( y, height - y )) * width / height) ** 2 )
return ( threshold > distance / fullDistance )
|
{"/mzconvert.py": ["/mzlib.py"], "/mzplot.py": ["/mzlib.py", "/filters.py"], "/test.py": ["/mzlib.py"]}
|
15,300
|
jaspringer/pymzlib
|
refs/heads/master
|
/mzconvert.py
|
#!/usr/bin/env python
"""
Copyright: 2010 Thomas McGrew
License: X11 license.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
VERSION = "0.2"
import mzlib
from sys import argv
def main( options=None, args=None ):
"""The main method"""
if not len( args ):
print ( "This program requires a filename argument" )
sys.exit( 1 )
inputFile = args[ 0 ]
outputFile = args[ 1 ]
rawData = mzlib.RawData( inputFile )
rawData.write( outputFile )
if __name__ == "__main__":
main( args = argv[1:] )
|
{"/mzconvert.py": ["/mzlib.py"], "/mzplot.py": ["/mzlib.py", "/filters.py"], "/test.py": ["/mzlib.py"]}
|
15,301
|
jaspringer/pymzlib
|
refs/heads/master
|
/mzlib.py
|
"""
Copyright: 2010 Thomas McGrew
License: MIT license.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import os
from xml.dom.minidom import parse
import struct
from base64 import b64decode, b64encode
import re
import zlib
import gzip
from copy import deepcopy
try:
import json
except ImportError:
try:
import simplejson as json
except:
json = False
VERSION = "0.2.1.2012.02.27"
class RawData( object ):
def __init__( self, _input=None ):
if type( _input ) == RawData:
# copy the passed in object
self.data = deepcopy( _input.data )
elif type( _input ) == str:
# read the passed in file name
self.read( _input )
else:
self.data = { 'scans' : [] }
def getScan( self, retentionTime ):
"""
Gets a scan from the data by retention time.
:Parameters:
retentionTime : float
A float indicating the retention time of the scan to retrieve. The scan
closest to that time is returned.
rtype: dict
return: A dict containing the scan points & metadata
"""
difference = 1048576
returnvalue = None
for scan in self.data[ 'scans' ]:
currentDifference = abs( scan[ 'retentionTime' ] - retentionTime )
if currentDifference < difference:
difference = currentDifference
returnvalue = scan
return returnvalue
def __getitem__( self, value ):
"""
Returns a list indicating the sic intensity for each scan in order. Only for
level 1 scans - other scans are omitted.
:Parameters:
value : slice
The m/z indices to retrieve intensity values between.
rtype: list
return: A list of intensity values.
"""
returnvalue = []
if type( value ) == slice:
if ( value.start ):
start = value.start
else:
start = 0
if ( value.stop ):
stop = value.stop
else:
stop = 1048576
return self.sic( start, stop, 1 )
else:
return self.sic( value - 0.1, value + 0.1, 1 )
def __iter__( self ):
return iter( self.data['scans'] )
def removeScans( self, minTime=0, maxTime=sys.maxint ):
"""
Discards all scans in the given time range.
:Parameters:
minTime : float
The minimum retention time for the scans to remove
maxTime : float
The maximum retention time for the scans to remove
"""
if minTime < maxTime:
self.data[ 'scans' ] = [ scan for scan in self.data['scans'] if
scan[ 'retentionTime' ] < minTime or
scan[ 'retentionTime' ] >= maxTime ]
def onlyScans( self, minTime=0, maxTime=sys.maxint ):
"""
Keeps only the scans specified in the time range, discarding all others.
:Parameters:
minTime : float
The minimum retention time for the scans to remove
maxTime : float
The maximum retention time for the scans to remove
"""
if minTime < maxTime:
self.data[ 'scans' ] = [ scan for scan in self.data['scans'] if
scan[ 'retentionTime' ] >= minTime and
scan[ 'retentionTime' ] < maxTime ]
def removeMz( self, mz, tolerance=0.1 ):
"""
Discards all data points with the specified m/z +/- the specified tolerance
:Parameters:
mz : float
The m/z value of the mass to be removed from all scans.
tolerance : float
The tolerance to use for determining if the data point should be removed.
Defaults to 0.1.
"""
for scan in self.data[ 'scans' ]:
try:
scan[ 'mzArray' ], scan[ 'intensityArray' ] = list( zip(
*[ point for point in zip( scan[ 'mzArray' ], scan[ 'intensityArray' ])
if point[ 0 ] < mz - tolerance or
point[ 0 ] >= mz + tolerance ]))
except ValueError:
scan[ 'mzArray' ] = []
scan[ 'intensityArray' ] = []
def onlyMz( self, mz, tolerance=0.1 ):
"""
Keeps only data points with the specified m/z +/- the specified tolerance,
discarding all others.
:Parameters:
mz : float
The m/z value of the mass to be retained from all scans.
tolerance : float
The tolerance to use for determining if the data point should be removed.
Defaults to 0.1.
"""
for scan in self.data[ 'scans' ]:
try:
scan[ 'mzArray' ], scan[ 'intensityArray' ] = list( zip(
*[ point for point in zip( scan[ 'mzArray' ], scan[ 'intensityArray' ])
if point[ 0 ] >= mz - tolerance and
point[ 0 ] < mz + tolerance ]))
except ValueError:
scan[ 'mzArray' ] = []
scan[ 'intensityArray' ] = []
def sic( self, start=0, stop=1048576, level=1 ):
"""
Returns a list indicating the selected intensity for each scan in order.
:Parameters:
start : float
The m/z indices to retrieve intensity values higher than or equal to.
stop : float
The m/z indecies to retrieve intensity values less than.
level : int
The msLevel of the scans to get intensity values for. A value of 0
uses all scans.
rtype: list
return: A list of intensity values.
"""
returnvalue = []
for scan in self.data[ 'scans' ]:
if not level or ( scan[ 'msLevel' ] == level ):
returnvalue.append( sum([ int_ for mz,int_ in
zip( scan[ 'mzArray' ], scan[ 'intensityArray' ])
if mz >= start and mz < stop ]))
return returnvalue
def tic( self, level=1 ):
"""
Returns a list indicating the total intensity for each scan in order.
:Parameters:
level : int
The msLevel of the scans to get intensity values for. A value of 0
uses all scans.
rtype: list
return: A list of intensity values.
"""
return [ sum( scan[ 'intensityArray' ]) for scan in self.data[ 'scans' ]
if ( not level or ( scan[ 'msLevel' ] == level ))]
def bpc( self, level=1 ):
"""
Returns a list indicating the base intensity for each scan in order.
:Parameters:
level : int
The msLevel of the scans to get intensity values for. A value of 0
uses all scans
rtype: list
return: A list of intensity values.
"""
try:
return [ self.max_( scan[ 'intensityArray' ])
for scan in self.data[ 'scans' ]
if ( not level or ( scan[ 'msLevel' ] == level ))]
except ValueError:
return 0;
def minMz( self ):
"""
Returns the minimum mz value in the data.
rtype: float
return: A float containing the min mz in the data.
"""
return min([ x[ 'mzRange' ][ 0 ] for x in self.data[ 'scans' ]])
def maxMz( self ):
"""
Returns the maximum mz value in the data.
rtype: float
return: A float containing the max mz in the data.
"""
return ( max([ x[ 'mzRange' ][ 1 ] for x in self.data[ 'scans' ]]))
def max_( self, sequence ):
if len( sequence ):
return max( sequence )
else:
return 0;
def read( self, filename ):
"""
Load a file into this reference. This method will automatically detect the
file type based on the file extension.
:Parameters:
filename : str
The name of the file to load.
"""
if not os.path.exists( filename ):
raise IOError( "The file %s does not exist or is not readable" % filename )
if filename.lower( ).endswith( ".csv" ):
return self.readCsv( filename )
elif filename.lower( ).endswith( ".mzdata" ) or filename.endswith( ".mzdata.xml" ):
return self.readMzData( filename )
elif filename.lower( ).endswith( ".mzxml" ):
return self.readMzXml( filename )
elif filename.lower( ).endswith( ".mzml" ):
return self.readMzMl( filename )
elif filename.lower( ).endswith( ".json" ):
return self.readJson( filename )
elif filename.lower( ).endswith( ".json.gz" ):
return self.readJsonGz( filename )
else:
sys.stderr.write( "Unrecognized file type for %s\n" % filename )
return False
def readCsv( self, filename ):
"""
Read a file in Agilent csv format.
:Parameters:
filename : str
The name of the file to load.
"""
self.data = { "scans" : [] }
try:
f = open( filename, 'r' )
lines = f.readlines( )
f.close( )
except IOError:
sys.stderr.write( "Error: unable to read file '%s'\n" % filename )
return False
i = 0
while( i < len( lines ) and lines[ i ][ :10 ] != "file name," ):
i+= 1
self.data[ 'sourceFile' ] = lines[ i ].split( ',' )[ 1 ]
while ( i < len( lines ) and lines[ i ][ :9 ] != "[spectra]" ):
i+=1
i+=1
if ( i > len( lines ) ):
sys.stderr.write( "Unable to parse the reference file '%s'\n" % filename )
return False
scanId = 0
for line in lines[ i: ]:
scanId += 1
values = line.split( ',' )
if values[ 4 ] == '-':
polarity = -1
else:
polarity = 1
rt = float( values[ 0 ])
count = float( values[ 6 ])
intensityValues = [ float( x ) for x in values[ 8:-1:2 ] ]
massValues = [ float( y ) for y in values[ 7:-1:2 ] ]
self.data[ "scans" ].append({
"retentionTime" : rt,
"polarity" : polarity,
"msLevel" : 1,
"id" : scanId,
"mzRange" : [ min( massValues ), max( massValues ) ],
"parentScan" : None,
"precursorMz" : None,
"collisionEnergy" : None,
"mzArray" : massValues,
"intensityArray" : intensityValues
})
return True
def readMzData( self, filename ):
"""
Read a file in mzData format.
:Parameters:
filename : str
The name of the file to load.
"""
self.data = { "scans" : [] }
dataFile = parse( filename )
sourceFileNode = dataFile.getElementsByTagName( 'sourceFile' )[ 0 ].\
getElementsByTagName( 'nameOfFile' )[ 0 ]
self.data[ 'sourceFile' ] = re.sub( "<.*?>", "", sourceFileNode.toxml( ))
scans = dataFile.getElementsByTagName( 'spectrum' )
for scan in scans:
parentScan = None
precursorMz = None
collisionEnergy = None
scanId = int( scan.getAttribute( 'id' ))
spectrumInstrument = scan.getElementsByTagName( 'spectrumInstrument' )[ 0 ]
msLevel = int( spectrumInstrument.getAttribute( 'msLevel' ))
lowMz = float( spectrumInstrument.getAttribute( 'mzRangeStart' ))
highMz = float( spectrumInstrument.getAttribute( 'mzRangeStop' ))
params = spectrumInstrument.getElementsByTagName( 'cvParam' )
for param in params:
if param.getAttribute( 'name' ) == 'Polarity':
if param.getAttribute( 'value' ) == 'positive':
polarity = 1
else:
polarity = -1
if param.getAttribute( 'name' ) == 'TimeInMinutes':
rt = float( param.getAttribute( 'value' ))
massValues = self._unpackMzData(
scan.getElementsByTagName( 'mzArrayBinary' )[ 0 ].getElementsByTagName( 'data' )[ 0 ])
intensityValues = self._unpackMzData(
scan.getElementsByTagName( 'intenArrayBinary' )[ 0 ].getElementsByTagName( 'data' )[ 0 ])
precursors = scan.getElementsByTagName( 'precursor' )
for precursor in precursors[ 0:1 ]:
parentScan = int( precursor.getAttribute( 'spectrumRef' ))
cvParams = precursor.getElementsByTagName( 'cvParam' )
for param in cvParams:
if param.getAttribute( 'name' ) == 'MassToChargeRatio':
precursorMz = float( param.getAttribute( 'value' ))
# if param.getAttribute( 'name' ) == 'ChargeState':
# chargeState = int( param.getAttribute( 'value' ))
if param.getAttribute( 'name' ) == 'CollisionEnergy':
collisionEnergy = float( param.getAttribute( 'value' ))
self.data[ "scans" ].append({
"retentionTime" : rt,
"polarity" : polarity,
"msLevel" : msLevel,
"id" : scanId,
"mzRange" : [ lowMz, highMz ],
"parentScan" : parentScan,
"precursorMz" : precursorMz,
"collisionEnergy" : collisionEnergy,
"mzArray" : list( massValues ),
"intensityArray" : list( intensityValues )
})
return True
def _unpackMzData( self, dataNode ):
"""
Internal function. Unpacks the scan data contained in a <data> node in mzdata
format.
:Parameters:
dataNode : xmlNode
The xml node containing the scan data to be unpacked.
"""
scanSize = int( dataNode.getAttribute( 'length' ))
if not scanSize:
return []
# else
if dataNode.getAttribute( 'endian' ) == 'little':
byteOrder = '<'
else:
byteOrder = '>'
if dataNode.getAttribute( 'precision' ) == '64':
dataType = 'd'
else:
dataType = 'f'
return struct.unpack( byteOrder + ( dataType * scanSize ),
b64decode( re.sub( "<.*?>", "", dataNode.toxml( ))))
def readMzXml( self, filename ):
"""
Read a file in mzXML format.
:Parameters:
filename : str
The name of the file to load.
"""
self.data = { "scans" : [] }
dataFile = parse( filename )
self.data[ 'sourceFile' ] = dataFile.getElementsByTagName( 'parentFile' )[ 0 ].\
getAttribute( 'fileName' )
scans = dataFile.getElementsByTagName( 'scan' )
for scan in scans:
collisionEnergy = None
precursorMz = None
msLevel = int( scan.getAttribute( "msLevel" ))
scanSize = int( scan.getAttribute( 'peaksCount' ))
rt = float( scan.getAttribute( 'retentionTime' )[ 2:-1 ] ) / 60
scanId = int( scan.getAttribute( 'num' ))
lowMz = float( scan.getAttribute( 'lowMz' ))
highMz = float( scan.getAttribute( 'highMz' ))
if ( scan.getAttribute( 'polarity' ) == '+' ):
polarity = 1
else:
polarity = -1
if msLevel == 1:
parentScan = None
else:
parentScan = int( scan.parentNode.getAttribute( 'num' ))
if ( scan.getAttribute( 'collisionEnergy' )):
collisionEnergy = float( scan.getAttribute( 'collisionEnergy' ))
precursorTags = scan.getElementsByTagName( 'precursorMz' )
if ( len( precursorTags )):
precursorMz = float( re.sub( r"<.*?>", "", precursorTags[ 0 ].toxml( )).strip( ))
peaks = scan.firstChild
while not ( peaks.nodeType == peaks.ELEMENT_NODE and peaks.tagName == 'peaks' ):
peaks = peaks.nextSibling
if peaks.getAttribute( 'precision' ) == '64':
type = 'd'
else:
type='f'
byteOrder = '>'
# get all of the text (non-tag) content of peaks
packedData = re.sub( r"<.*?>", "", peaks.toxml( )).strip( )
if not scanSize:
massValues = []
intensityValues = []
else:
if ( peaks.getAttribute( 'compressionType' ) == 'zlib' ):
data = struct.unpack( byteOrder + ( type * scanSize * 2 ), zlib.decompress( b64decode( packedData )))
else:
data = struct.unpack( byteOrder + ( type * scanSize * 2 ), b64decode( packedData ))
massValues = data[ 0::2 ]
intensityValues = data[ 1::2 ]
self.data[ "scans" ].append({
"retentionTime" : rt,
"polarity" : polarity,
"msLevel" : msLevel,
"id" : scanId,
"mzRange" : [ lowMz, highMz ],
"parentScan" : parentScan,
"precursorMz" : precursorMz,
"collisionEnergy" : collisionEnergy,
"mzArray" : list( massValues ),
"intensityArray" : list( intensityValues )
})
return True
def readMzMl( self, filename ):
raise NotImplementedError(
"Reading from this file type has not yet been implemented." )
def _getChildNode( self, node, child ):
"""
Internal function. Finds the child node of the passed in xml node with the
given tag name.
:Parameters:
node : minidom node
A minidom node object
child : str
A string containing the tag name of the child node to return.
rtype: minidom node
return: The requested child of the minidom node.
"""
returnvalue = node.firstChild
while returnvalue and not (
returnvalue.nodeType == returnvalue.ELEMENT_NODE and
returnvalue.tagName == child ):
returnvalue = returnvalue.nextSibling
return returnvalue
def readJson( self, filename ):
"""
Reads ms data from a file containing gzipped JSON data. No checks are done,
so make sure the data is of the same format as that produced by this
library, otherwise, unpredictable things may happen. This method may not be
supported on versions of python prior to 2.5.
:Parameters:
filename : string
The name of a file containing gzip compressed JSON data
"""
if not json:
raise NotImplementedError( "This method is not supported in your version of Python" )
in_ = open( filename, 'r' )
self.data = json.load( in_ )
in_.close( )
return True
def readJsonGz( self, filename ):
"""
Reads ms data from a file containing gzipped JSON data. No checks are done,
so make sure the data is of the same format as that produced by this
library, otherwise, unpredictable things may happen. This method may not be
supported on versions of python prior to 2.5.
:Parameters:
filename : string
The name of a file containing gzip compressed JSON data
"""
if not json:
raise NotImplementedError( "This method is not supported in your version of Python" )
in_ = gzip.open( filename, 'r' )
self.data = json.load( in_ )
in_.close( )
return True
def write( self, filename ):
"""
Load a file into this reference. This method will automatically detect the
file type based on the file extension.
:Parameters:
filename : str
The name of the file to load.
"""
if filename.lower( ).endswith( ".csv" ):
return self.writeCsv( filename )
elif ( filename.lower( ).endswith( ".mzdata" ) or
filename.lower( ).endswith( ".mzdata.xml" )):
return self.writeMzData( filename )
elif filename.lower( ).endswith( ".mzxml" ):
return self.writeMzXml( filename )
elif filename.lower( ).endswith( ".mzml" ):
return self.writeMzMl( filename )
elif filename.lower( ).endswith( ".json" ):
return self.writeJson( filename )
elif filename.lower( ).endswith( ".json.gz" ):
return self.writeJsonGz( filename )
else:
sys.stderr.write( "Unrecognized file type for %s\n" % filename )
return False
def writeCsv( self, filename ):
"""
:Parameters:
filename : string
The name of the file to write to.
rtype: bool
return: True if the write succeeded
"""
out = open( filename, 'w' )
out.write( "[data source]\n" )
out.write( "file name,%s\n" % self.data[ 'sourceFile' ] )
out.write( "[filters]\n" )
out.write( "mass range,%f,%f\n" % ( self.minMz( ), self.maxMz( )))
rtList = [ x['retentionTime'] for x in self.data['scans']]
out.write( "time range,%f,%f\n" % ( min( rtList ), max( rtList )))
out.write( "number of spectra,%d\n" % len( self.data['scans'] ))
out.write( "[format]\n" )
out.write( "retention time, sample, period, experiment, polarity, scan type, points, x1, y1, x2, y2, ...\n" )
out.write( "[spectra]\n" )
level2 = False
for scan in self.data[ 'scans' ]:
if ( scan[ 'msLevel' ] > 1 ):
if not level2:
print( "Agilent CSV format does not support multimensional data, ignoring scans with level > 1" )
level2 = True
continue
if ( scan[ 'polarity' ] > 0 ):
polarity = '+'
else:
polarity = '-'
out.write( "%f,%d,%d,%d,%s,%s,%d" %
( scan[ 'retentionTime' ], 1, 1, 1, polarity, "peak",
len( scan[ 'mzArray' ])))
for point in zip( scan[ 'mzArray' ], scan[ 'intensityArray' ]):
out.write( ',%f,%f' % point )
out.write( "\n" )
out.close( )
def writeMzData( self, filename ):
out = open( filename, 'w' );
out.write( '<?xml version="1.0" encoding="UTF-8"?>\n' )
out.write( '<mzData version="1.05" accessionNumber="psi-ms:100" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n' )
out.write( ' <cvLookup cdLabel="psi" fullName="The PSI Ontology" version="1.00" address="http://psidev.sourceforge.net/ontology" />\n' )
out.write( ' <description>\n' )
out.write( ' <admin>\n' )
out.write( ' <sampleName/>\n' )
out.write( ' <sampleDescription comment="" />\n' )
out.write( ' <sourceFile>\n' )
out.write( ' <nameOfFile />\n' )
out.write( ' <pathToFile />\n' )
out.write( ' </sourceFile>\n' )
out.write( ' <contact>\n' )
out.write( ' <name />\n' )
out.write( ' <institution />\n' )
out.write( ' <contactInfo />\n' )
out.write( ' </contact>\n' )
out.write( ' </admin>\n' )
out.write( ' <instrument>\n' )
out.write( ' <instrumentName />\n' )
out.write( ' <source />\n' )
out.write( ' <analyzerList count="1">\n' )
out.write( ' <analyzer>\n' )
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000010" name="AnalyzerType" value="unknown" />\n' )
out.write( ' </analyzer>\n' )
out.write( ' </analyzerList>\n' )
out.write( ' <detector>\n' )
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000026" name="DetectorType" value="unknown" />\n' )
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000029" name="SamplingFrequency" value="unknown" />\n' )
out.write( ' </detector>\n' )
out.write( ' <additional />\n' )
out.write( ' </instrument>\n' )
out.write( ' <dataProcessing>\n' )
out.write( ' <software completionTime="">\n' )
out.write( ' <name>pymzlib, Version=%s</name>\n' % VERSION )
out.write( ' <version>%s</version>\n' % VERSION )
out.write( ' <comments />\n' )
out.write( ' </software>\n' )
out.write( ' <processingMethod>\n' )
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000033" name="deisotoped" value="unknown" />\n' )
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000034" name="chargeDeconvolved" value="unknown" />\n' )
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000035" name="peakProcessing" value="unknown" />\n' )
out.write( ' </processingMethod>\n' )
out.write( ' </dataProcessing>\n' )
out.write( ' </description>\n' )
out.write( ' <spectrumList count="%d">\n' % len( self.data[ 'scans' ]))
for scan in self.data[ 'scans' ]:
if ( scan[ 'polarity' ] > 0 ):
polarity = 'Positive'
else:
polarity = 'Negative'
out.write( ' <spectrum id="%d">\n' % scan[ 'id' ])
out.write( ' <spectrumDesc>\n' )
out.write( ' <spectrumSettings>\n' )
out.write( ' <acqSpecification spectrumType="unknown" methodOfCombination="unknown" count="1">\n' )
out.write( ' <acquisition number="%d" />\n' % scan[ 'id' ])
out.write( ' </acqSpecification>\n' )
out.write( ' <spectrumInstrument msLevel="%d" mzRangeStart="%f" mzRangeStop="%f">\n' % ( scan[ 'msLevel' ], scan[ 'mzRange' ][ 0 ], scan[ 'mzRange' ][ 1 ]))
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000036" name="ScanMode" value="Scan" />\n' )
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000037" name="Polarity" value="%s" />\n' % polarity )
out.write( ' <cvParam cvLabel="psi" accession="PSI:1000038" name="TimeInMinutes" value="%f" />\n' % scan[ 'retentionTime' ])
out.write( ' </spectrumInstrument>\n' )
out.write( ' </spectrumSettings>\n' )
out.write( ' </spectrumDesc>\n' )
out.write( ' <mzArrayBinary>\n' )
dataLen = len( scan[ 'mzArray' ])
out.write( ' <data precision="64" endian="little" length="%d">%s</data>\n' % ( dataLen, b64encode( struct.pack( '<' + ( 'd' * dataLen ), *scan[ 'mzArray' ]))))
out.write( ' </mzArrayBinary>\n' )
out.write( ' <intenArrayBinary>\n' )
out.write( ' <data precision="64" endian="little" length="%d">%s</data>\n' % ( dataLen, b64encode( struct.pack( '<' + ( 'd' * dataLen ), *scan[ 'intensityArray' ]))))
out.write( ' </intenArrayBinary>\n' )
out.write( ' </spectrum>\n' )
out.write( ' </spectrumList>\n' )
out.write( '</mzData>\n' )
def writeMzXML( self, filename ):
raise NotImplementedError(
"Writing to this file type has not yet been implemented." )
def writeMzML( self, filename ):
raise NotImplementedError(
"Writing to this file type has not yet been implemented." )
def writeJson( self, filename, indent=None ):
"""
Dumps the data to a JSON array.
:Parameters:
maData : dict
A dictionary object containing scan data, normally returned from
filename : string
The name of the file to write to.
indent : int
Level to indent for pretty-printing, or None for no pretty-print.
Defaults to None
"""
if not json:
raise NotImplementedError( "This method is not supported in your version of Python" )
if( indent ):
sep = (', ',': ')
else:
sep = (',',':')
out = open( filename, 'w' )
json.dump( self.data, out, indent=indent, separators=sep )
out.close( )
def writeJsonGz( self, filename, indent=None, compressionLevel=6 ):
"""
Dumps the data to a JSON array, compressed with zlib.
:Parameters:
maData : dict
A dictionary object containing scan data, normally returned from
filename : string
The name of the file to write to.
indent : int
Level to indent for pretty-printing, or None for no pretty-print.
Defaults to None
compressionLevel : int
Compression level to use - 0 for least compression, 9 for most.
Defaults to 6.
"""
if( indent ):
sep = (', ',': ')
else:
sep = (',',':')
out = gzip.open( filename, 'wb', compressionLevel )
out.write( json.dumps( self.data, indent=indent, separators=sep))
out.close( )
|
{"/mzconvert.py": ["/mzlib.py"], "/mzplot.py": ["/mzlib.py", "/filters.py"], "/test.py": ["/mzlib.py"]}
|
15,302
|
jaspringer/pymzlib
|
refs/heads/master
|
/mzplot_cgi.py
|
#!/usr/bin/env python
"""
Copyright: 2010 Thomas McGrew
License: MIT license.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
VERSION = "0.2.1"
import cgi
import sys
import os
import re
from base64 import b64encode
from time import sleep
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
import cgitb; cgitb.enable( )
# local imports
os.environ[ 'HOME' ] = "/tmp"
import mzplot
DATA_ROOT = "/var/mzplot"
CACHE_DIR = "/tmp/mzplot"
class Options( object ):
def __init__( self ):
self.minTime = 0
self.maxTime = 0
self.bpc = False
self.mass = 0
self.massWindow = 0.2
self.connectPeaks = False
self.showLegend = True
self.shortFilename = True
self.massLabels = False
self.showPeaks = True
self.showNoise = False
self.markerAlpha = 1
self.lineWidth = 1
self.scriptMode = True
self.verbosity = 0
self.removeNoise = False
self.outputFile = None
self.width = 800
self.height = 450
self.dpi = 72
self.filterLevel = 0
self.normalize = False
def hash( self ):
optHash = sha1( )
optHash.update( str( self.__dict__ ))
return optHash.digest( )
def main( ):
options = Options( )
os.nice( 10 )
form = cgi.FieldStorage( )
for i in form.keys( ):
if not i == "files":
value = None
try:
value = float( form[ i ].value )
except ValueError:
if form[ i ].value.lower( ) == "true":
value = True
elif form[ i ].value.lower( ) == "false":
value = False
if ( value ):
options.__setattr__( i, value )
files = []
if form.has_key( 'files' ):
for file_ in re.split( "[,|]", form[ 'files' ].value ):
if ( "../" in file_ ):
print "Content-Type: text/plain"
print
print "Invalid path: " + file_
return
files.append( DATA_ROOT + "/" + file_ )
if not os.path.exists( CACHE_DIR ):
os.makedirs( CACHE_DIR )
fileHash = sha1( )
fileHash.update( str( sorted( files )))
options.outputFile = ( CACHE_DIR + "/" +
b64encode( options.hash( ) + fileHash.digest( ), "_.") +
".png" ).replace( "=", "" )
lockfile = options.outputFile + ".lock"
if not os.path.exists( options.outputFile ):
if not os.path.exists( lockfile ):
f = open( lockfile, 'w' )
f.write( str( os.getpid( )))
f.close( )
try:
mzplot.main( options, files )
finally:
os.remove( lockfile )
else:
wait_time = 2 #seconds
total_wait = 120 #seconds
while total_wait > 0 and os.path.exists( lockfile ):
sleep( wait_time )
total_wait -= wait_time
# headers
print "Content-Type: image/png"
if os.path.exists( options.outputFile ):
imageData = open( options.outputFile, 'r' )
imageBytes = imageData.read( )
imageData.close( )
else:
imageBytes = str( )
print "Content-Length: %d" % len( imageBytes )
# arbitrary expiration date way in the future
# essentially "cache this as long as you want."
# print "Expires: Sun, 31 Dec 2006 16:00:00 GMT"
print "Expires: Sun, 31 Dec 2034 16:00:00 GMT"
print
# end headers
sys.stdout.write( imageBytes )
# run the main method if this file isn't being imported.
if ( __name__ == "__main__" ):
main( )
|
{"/mzconvert.py": ["/mzlib.py"], "/mzplot.py": ["/mzlib.py", "/filters.py"], "/test.py": ["/mzlib.py"]}
|
15,303
|
jaspringer/pymzlib
|
refs/heads/master
|
/mzplot.py
|
#!/usr/bin/env python
"""
Copyright: 2010 Thomas McGrew
License: MIT license.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
VERSION = "0.2.1.2012.02.27"
import sys
import os
import os.path
#from hashlib import sha1
#import cPickle
from optparse import OptionParser,SUPPRESS_HELP
import numpy as numerical
import math
import mzlib
from csv import DictReader
try:
import filters
except ImportError:
filters = None
try:
import matplotlib
from matplotlib.font_manager import FontProperties
except ImportError:
sys.stderr.write( "\n*** THIS PROGRAM REQUIRES MATPLOTLIB, WHICH DOES NOT "
"APPEAR TO BE INSTALLED ***\n" )
raise
# To do:
# add an option to allow indexing by scan number
COLORS = {
# Available colors (the lower case letter is used for that color):
# rED, gREEN, bLUE, cYAN, mAGENTA, yELLOW, BLACk, wHITE
# html color codes can also be used.
"intensity" : [ "b","r","y","c","m","g" ],
"noise" : [ "b","r","y","c","m","g" ],
"ref" : [ "k","c","m","y","r","g","b"]
}
def parseOpts( ):
"""
Parses the command line options passed into the program
returns:
2 element list ( opts, args )
"""
optparser = OptionParser( version="%prog " + VERSION )
optparser.add_option( "--minrt", type="float", default=0,
dest="minTime", metavar="RT", help="The minimum "
"retention time to show on the graph" )
optparser.add_option( "--maxrt", type="float", default=0,
dest="maxTime", metavar="RT", help="The maximum "
"retention time to show on the graph" )
optparser.add_option( "--bpc", action="store_true", default=False, dest="bpc",
help="Show only base peaks, i.e. the highest intensity "
"value at each retention time (BPC) instead of the "
"total ion chromatogram (TIC)." )
optparser.add_option( "-m", "--mass", type="float", dest="mass",
help="Filter data by a particular mass, a.k.a Selected "
"Ion Chromatogram (SIC)" )
optparser.add_option( "-w", "--mass-window", type="float", dest="massWindow",
default=0.2, metavar="SIZE", help="The range of the "
"mass to be displayed in Dalton. Default is %default. "
"This option is only used with -m (SIC mode)" )
optparser.add_option( "-c", "--connect-peaks", action="store_true",
dest="connectPeaks", help="Draw lines connecting the "
"peaks from Xmass output" )
optparser.add_option( "-l", "--legend", action="store_true", default=False,
dest="showLegend", help="Display a legend." )
optparser.add_option( "--short-filename", action="store_true", default=False,
dest="shortFilename", help="Display only the filename "
"(without the path) in the legend." )
optparser.add_option( "--labels", action="store_true", dest="massLabels",
help="Show a label for each peak containing its mass" )
optparser.add_option( "--hide-peaks", action="store_false", default=True,
dest="showPeaks", help="Do not show the peak bars from "
"the xmass output on the graph" )
optparser.add_option( "-n", "--noise", action="store_true", default=False,
dest="showNoise", help="Show the noise from the xmass "
"output on the graph" )
optparser.add_option( "--alpha", type="float", default=1, dest="markerAlpha",
metavar="ALPHA", help="Set the starting opacity level "
"of the lines on the graph (0.0-1.0, Defaults to "
"%default)" )
optparser.add_option( "--line-width", type="float", default=1,
dest="lineWidth", metavar="WIDTH", help="Set the width "
"of the bars on the graph. Defaults to %default" )
optparser.add_option( "-s", "--script", action="store_true",
dest="scriptMode", help="Run in script mode, i.e. do "
"not display the graph. This is only useful with the "
"-o option" )
optparser.add_option( "-v", "--verbose", action="count", dest="verbosity",
default=0, help="Print more messages about what the "
"program is doing." )
optparser.add_option( "--subtract-noise", action="store_true",
dest="removeNoise", help="Subtract the noise from the "
" intensity values in the peak list" )
optparser.add_option( "-o", "--out", dest="outputFile", metavar="FILE",
help="Save the generated graph to the given file. "
"Supported types depends on your platform, but most "
"platforms support png, pdf, ps, eps and svg." )
optparser.add_option( "--width", type="int", default=800, dest="width",
help="The width of the generated image. "
"Defaults to %default. For use with -o" )
optparser.add_option( "--height", type="int", default=450, dest="height",
help="The height of the generated image. "
"Defaults to %default. For use with -o" )
optparser.add_option( "--dpi", type="int", default=72, dest="dpi",
help="The dpi of the generated image. "
"Defaults to %default. For use with -o" )
# optparser.add_option( "--by-scan", action="store_true", dest="byScan",
# help="Index the graph by scan number instead of retention time" )
if filters:
optparser.add_option( "--hpf", type="float", dest="hpfThreshold",
metavar="THRESHOLD", help="Run any chromatogram data "
"through an fft high pass filter before displaying. "
"hpfThreshold should be a value between 0 and 1" )
optparser.add_option( "--lpf", type="float", dest="lpfThreshold",
metavar="THRESHOLD", help="Run any chromatogram data "
"through an fft low pass filter before displaying. "
"lpfThreshold should be a value between 0 and 1" )
optparser.add_option( "--snratio", type="float", default=0,
dest="filterLevel", metavar="RATIO", help="Drop peaks "
"whose signal/noise ratio is less than RATIO" )
optparser.add_option( "--normalize", action="store_true", dest="normalize",
help="Normalize all plots to have a maximum value of 1" )
return optparser.parse_args( )
def main( options=None, args=None ):
"""The main method"""
global COLORS
if options.scriptMode:
matplotlib.use( 'Agg' )
import pylab
thisFigure = pylab.figure( )
if options.outputFile:
thisFigure.set_size_inches((
float( options.width ) / options.dpi,
float( options.height ) / options.dpi ))
pylab.subplots_adjust( left = 0.6 / options.width * options.dpi,
right = 1.0 - 0.2 / options.width * options.dpi,
top = 1.0 - 0.45 / options.height * options.dpi,
bottom = 0.5 / options.height * options.dpi )
barOffset = options.lineWidth / 2
barAlpha = options.markerAlpha * 2 / 3
rawFiles = [ ]
rawTypes = [ '.csv', '.mzdata', '.mzxml', '.mzxml.xml',
'.json', '.json.gz' ]
for i in range( len( args )-1, -1, -1 ):
arg = args[ i ]
try:
# check the extension to see if this is xmass input data
for type_ in rawTypes:
if arg.lower( ).endswith( type_ ):
rawFiles.append( args.pop( i ))
continue
except ValueError:
pass
if rawFiles:
for r in rawFiles:
ref = mzlib.RawData( )
if not ( ref.read( r )):
sys.stderr.write( "Error: Unable to load data from '%s'" % r )
sys.exit( -1 )
if options.shortFilename:
filename = os.path.basename( r )
else:
filename = r
# apply any filters
if options.mass:
ref.onlyMz( options.mass, options.massWindow )
if options.maxTime or options.minTime:
if options.maxTime:
ref.onlyScans( options.minTime, options.maxTime )
else:
ref.onlyScans( options.minTime )
rt = [ scan[ "retentionTime" ] for scan in ref if scan[ "msLevel" ] == 1 ]
if options.bpc:
yAxis = ref.bpc( 1 )
else:
yAxis = ref.tic( 1 )
if filters:
if options.lpfThreshold and options.hpfThreshold:
yAxis = filters.bpf( yAxis, options.hpfThreshold, options.lpfThreshold )
elif options.lpfThreshold:
yAxis = filters.lpf( yAxis, options.lpfThreshold )
elif options.hpfThreshold:
yAxis = filters.hpf( yAxis, options.hpfThreshold )
if options.normalize:
if len( yAxis ):
max_ = max( yAxis )
if max_:
yAxis = [ x / max_ for x in yAxis ]
if options.normalize:
label = filename + " (normalized)"
else:
label = filename
pylab.plot( rt, yAxis, COLORS['ref'][0] , alpha = options.markerAlpha,
linewidth=options.lineWidth,
label = label )
COLORS['ref'] = COLORS['ref'][1:] + [ COLORS['ref'][0]]
if not options.scriptMode:
def findMedian( sortedArray ):
arrayLen = len( sortedArray )
if arrayLen % 2:
median = sortedArray[ arrayLen // 2 ] * sortedArray[ arrayLen // 2 + 1 ]
else:
median = sortedArray[ arrayLen // 2 ]
return median
array = list( yAxis );
array.sort( )
median = findMedian( array )
q1 = findMedian( array[ : len( array ) // 2 ])
q3 = findMedian( array[ int( math.ceil( len( array ) / 2.0 )) : ])
min_ = min( yAxis )
max_ = max( yAxis )
print( "Plot statistics for %s:" % label )
print( "\tRange: %g (%g - %g)" % ( max_ - min_, min_, max_ ))
print( "\tMean: %g" % numerical.mean( yAxis ))
print( "\tMedian: %g" % median )
print( "\tInterquartile Range: %g (%g - %g)" % ( q3 - q1, q1, q3 ))
print( "\tStandard Deviation: %g" % numerical.std( yAxis ))
print( "\tVariance: %g" % numerical.var( yAxis ))
# The following section of code is specific to the OmicsDP data formats.
# You can safely delete this section if you are using this software outside
# of that environment.
# BEGIN READING DLTs
for arg in args:
scan = numerical.empty( 0, numerical.uint64 ) # scan number
barRt = numerical.empty( 0, numerical.float64 ) # retention time
barIntensity = numerical.empty( 0, numerical.float64 )
barNoise = numerical.empty( 0, numerical.float64 )
labels = [ ]
try:
f = open( arg )
lines = DictReader( f )
except IOError:
sys.stderr.write("Error: unable to read file '%s'\n" % arg )
sys.exit( -1 )
if options.shortFilename:
filename = os.path.basename( arg )
else:
filename = arg
for line in lines:
try:
scanValue = int( line[ 'Scan' ])
rtValue = float( line[ 'RT(min)'] )
mzValue = float( line[ 'M/Z' ] )
noiseValue = float( line[ 'LC_Noise' ] )
intValue = float( line[ 'Int' ] )
if ( rtValue < options.minTime or
( options.maxTime and rtValue > options.maxTime )):
continue
if ((( not noiseValue ) or
intValue/noiseValue < options.filterLevel ) or
( options.mass and
abs( options.mass - mzValue ) > options.massWindow )):
if options.verbosity:
sys.stderr.write( "Dropping line %s" % ( line ))
continue
# using plot( ) produces a more responsive graph than vlines( )
if len( scan ) and scanValue == scan[ -1 ]:
if options.bpc:
if intValue > barIntensity[ -2 ]:
barIntensity[ -2 ] = intValue
barNoise[ -2 ] = noiseValue
labels[ -1 ] = "(%.2f," % ( mzValue - 0.005 ) #truncate, don't round
else:
barIntensity[ -2 ] += intValue
barNoise[ -2 ] += noiseValue
labels[ -1 ] += " %.2f," % ( mzValue - 0.005 ) #truncate, don't round
else:
# appending [0, value, 0] allows us to plot a bar graph using lines
barRt = numerical.append( barRt, [ rtValue, rtValue, rtValue ])
barIntensity = numerical.append( barIntensity, [ 0, intValue, 0 ])
barNoise = numerical.append( barNoise, [ 0, noiseValue, 0 ])
scan = numerical.append( scan, scanValue )
if ( len( labels )):
labels[ -1 ] = labels[ -1 ][ :-1 ] + ')' # replace the last , with )
labels.append( "(%.2f," % ( mzValue - 0.005 )) #truncate, don't round
except ( ValueError, IndexError ):
if options.verbosity:
sys.stderr.write( "Skipping line %s" % ( line ))
if ( len( labels )):
labels[ -1 ] = labels[ -1 ][ :-1 ] + ')' # replace the last , with )
if options.normalize:
if len( barIntensity ):
max_ = max( barIntensity )
if max_:
barIntensity /= max_
barNoise /= max_
if options.massLabels:
for i in xrange( len( labels )):
pylab.annotate( labels[ i ], ( barRt[ 3 * i + 1 ], barIntensity[ 3 * i + 1 ]),
size=9)
# calculate alpha based on which file this is in the list
alpha = ( options.markerAlpha - options.markerAlpha *
( args.index( arg ) / float( len( args ))) * 0.75 )
if options.showPeaks:
if not options.removeNoise:
barIntensity += barNoise
if options.normalize:
label = label = ( "%s - intensity (%d peaks, normalized)" %
( filename, len( barIntensity )/3))
else:
label = label = ( "%s - intensity (%d peaks)" %
( filename, len( barIntensity )/3))
pylab.plot( barRt, barIntensity, COLORS['intensity'][0] ,
linewidth = options.lineWidth*2, alpha = alpha, label = label )
if options.connectPeaks:
pylab.plot( barRt[ 2::3 ], barIntensity[ 1::3 ], COLORS['intensity'][0],
alpha = alpha, linewidth=options.lineWidth )
COLORS['intensity'] = COLORS['intensity'][1:] + [ COLORS['intensity'][0]]
if options.showNoise:
if options.normalize:
label = ( "%s - noise (%d points, normalized)" % ( filename, len( barNoise )/3))
else:
label = ( "%s - noise (%d points)" % ( filename, len( barNoise )/3))
pylab.plot( barRt[ 2::3 ], barNoise[ 1::3 ], COLORS['noise'][0], alpha = alpha,
linewidth=options.lineWidth, label = label)
COLORS['noise'] = COLORS['noise'][1:] + [ COLORS['noise'][0]]
if len( barRt ):
#draw a horizontal black line at 0
pylab.plot( [barRt[1], barRt[-2]], [0,0], 'k', linewidth=options.lineWidth )
f.close( )
# END READING DLTs
if options.showLegend:
legend = pylab.legend( loc="upper left", prop=FontProperties( size='small' ))
pylab.grid( )
axes = thisFigure.get_axes( )[ 0 ]
axes.set_xlabel( "Time (min)" )
axes.set_ylabel( "Intensity" )
axes.ticklabel_format( style="scientific", axis="y", scilimits=(3,3) )
if not len( rawFiles ):
if ( options.bpc ):
axes.set_title( "Base Peaks" )
else:
axes.set_title( "Peaks" )
elif options.bpc:
if options.mass:
axes.set_title(
"Selected Base Peak Chromatogram (M/Z: %f, Tolerance: %f)" %
( options.mass, options.massWindow ))
else:
axes.set_title( "Base Peak Chromatogram" )
else:
if options.mass:
axes.set_title(
"Selected Ion Chromatogram (M/Z: %f, Tolerance: %f)" %
( options.mass, options.massWindow ))
else:
axes.set_title( "Total Ion Chromatogram" )
if options.outputFile:
thisFigure.savefig( options.outputFile, dpi=options.dpi )
if not options.scriptMode:
pylab.show( )
## end main( )
if ( __name__ == "__main__" ):
main( *parseOpts( ))
|
{"/mzconvert.py": ["/mzlib.py"], "/mzplot.py": ["/mzlib.py", "/filters.py"], "/test.py": ["/mzlib.py"]}
|
15,304
|
jaspringer/pymzlib
|
refs/heads/master
|
/test.py
|
#!/usr/bin/env python2
from mzlib import *
mzXML2 = RawData( "testData/tiny1.mzXML2.0.mzXML" )
mzXML3 = RawData( "testData/tiny1.mzXML3.0.mzXML" )
mzData = RawData( "testData/tiny1.mzData1.05.xml" )
json = RawData( "testData/tiny1.json" )
jsonGz = RawData( "testData/tiny1.json.gz" )
if __name__ == "__main__":
print( "mzXML2.getScan( 5.89 )['points'] == mzXML3.getScan( 5.89 )['points']): " +
str(mzXML2.getScan( 5.89 )['points'] == mzXML3.getScan( 5.89 )['points']))
print( "mzXML2.getScan( 5.89 )['points'] == mzData.getScan( 5.89 )['points']): " +
str(mzXML2.getScan( 5.89 )['points'] == mzData.getScan( 5.89 )['points']))
print( "mzXML2[:] == mzXML3[:]: " + str(mzXML2[:] == mzXML3[:]))
print( "mzXML2[:] == mzData[:]: " + str(mzXML2[:] == mzData[:]))
print( "mzXML2[:] == json[:] : " + str(mzXML2[:] == json[:] ))
print( "mzXML2[:] == jsonGz[:]: " + str(mzXML2[:] == jsonGz[:]))
|
{"/mzconvert.py": ["/mzlib.py"], "/mzplot.py": ["/mzlib.py", "/filters.py"], "/test.py": ["/mzlib.py"]}
|
15,333
|
karthikvg/Twitter_Sentiment_Analysis
|
refs/heads/master
|
/helpers.py
|
def get_message(temp):
start = temp.find("text")+8
temp1 = temp[start:]
end = temp1.find(",")
return temp[start:start+end-1]
def write_to_a_file(filename, data):
with open(filename,"w", encoding='utf-8') as writer:
for x in data:
writer.write(str(x)+'\n')
|
{"/stream_tweets.py": ["/credits.py", "/helpers.py"]}
|
15,334
|
karthikvg/Twitter_Sentiment_Analysis
|
refs/heads/master
|
/credits.py
|
CONSUMER_KEY="1k92yCTO1Ihj0R5FujNNJmUbS"
CONSUMER_SECRET="hZtKpRq547ZifpRE56IWzsclsiYtF9QTHy8UkCtAG89kx5rvfT"
ACCESS_TOKEN="1039355503044845569-kmHjNNPvoeN6n5IszPLTRZa9zmuvSV"
ACCESS_SECRET="2wV2K6R7zgMM4QS30Pj4QhPZrkkVZXAsD0e457WO9e4D6"
|
{"/stream_tweets.py": ["/credits.py", "/helpers.py"]}
|
15,335
|
karthikvg/Twitter_Sentiment_Analysis
|
refs/heads/master
|
/stream_tweets.py
|
from tweepy import API
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import Stream
from tweepy import OAuthHandler
from textblob import TextBlob
import re
import numpy as np
import pandas as pd
import credits
import matplotlib.pyplot as plt
import helpers
class TwitterClient:
def __init__(self, user=None): # Authenticates the user
self.auth = TwitterAuthenticator().authenticate()
self.twitter_client = API(self.auth, wait_on_rate_limit=True)
self.user = user
def get_timeline_tweets(self, count): # Generates the timeline tweets for a given user id
tweets = list()
for tweet in Cursor(self.twitter_client.user_timeline, id=self.user).items(count):
tweets.append(tweet)
return tweets
def get_friends(self, count): # Returns all the friends for a given user id
friends = list()
for friend in Cursor(self.twitter_client.friends, id=self.user).items(count):
friends.append(friend)
return friends
def get_twitter_client_api(self): # Return the the twitter_client_api of the authenticated user
return self.twitter_client
# TwitterListener is used to get the data and also to handle the errors
class TwitterListener(StreamListener):
def __init__(self, filename):
self.filename = filename
def on_data(self, data):
with open(self.filename, 'a')as writing:
writing.write(data)
return True
def on_error(self, status):
if status == 420:
print(status)
return False
print(status)
# TwitterAuthenticator is used to authenticate the user with credentials listed in credits.py
class TwitterAuthenticator:
def authenticate(self): # A method to authenticate the user
auth = OAuthHandler(credits.CONSUMER_KEY, credits.CONSUMER_SECRET)
auth.set_access_token(credits.ACCESS_TOKEN, credits.ACCESS_SECRET)
return auth
class TwitterStreamer:
def __init__(self):
self.auth = TwitterAuthenticator()
def stream_tweets(self, filename,
hash_tag_list): # Used to stream tweets for the given hash_tag_list to given file name
listener = TwitterListener(filename)
auth = self.auth.authenticate()
stream = Stream(auth, listener)
stream.filter(track=hash_tag_list)
class TweetAnalyzer:
def clean_tweet(self, tweet): # Used to clean the given tweet which makes use of regular expression library
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analyze_tweet_sentiment(self, tweet): # Returns 1,-1,0 for positive, negative, neutral sentiments respectively
analyze = TextBlob(self.clean_tweet(tweet))
if analyze.sentiment.polarity > 0:
return 1
elif analyze.sentiment.polarity < 0:
return -1
else:
return 0
def tweets_to_data_frame(self, tweets): # Returns a data_frame of a tweets with required fields
dataFrame = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=["tweets"])
dataFrame['Source'] = np.array([tweet.source for tweet in tweets])
dataFrame['date'] = np.array([tweet.created_at for tweet in tweets])
dataFrame['len'] = np.array([len(tweet.text) for tweet in tweets])
dataFrame['likes'] = np.array([tweet.favorite_count for tweet in tweets])
return dataFrame
if __name__ == "__main__":
user = TwitterClient()
api = user.get_twitter_client_api()
tweets = api.user_timeline(screen_name='MelissaBenoist', count=200)
analyzer_temp = TweetAnalyzer()
data_frame = analyzer_temp.tweets_to_data_frame(tweets)
data_frame['Sentiment'] = np.array([analyzer_temp.analyze_tweet_sentiment(tweet) for tweet in data_frame['tweets']])
print(data_frame)
################################
# print(dir(tweets[0]))
# print(data_frame.head(5))
# print(data_frame['likes'])
# print(dir(tweets[0]))
# print(np.max(data_frame['likes']))
# time_likes = pd.Series(data=data_frame['len'].values*100, index=data_frame['date'])
# time_likes.plot(figsize=(16, 4), label='len', legend=True)
# time_likes = pd.Series(data=data_frame['likes'].values, index=data_frame['date'])
# time_likes.plot(figsize=(16, 4), label='likes', legend=True)
# plt.show()
######################################
# filename="karthik.json"
# hash_tag_list=["teradata"]
# tweets=user.get_timeline_tweets(0)
# friends=user.get_friends(0)
# print("the no of tweets for the given account id",len(tweets),sep=" ")
# print("the no of friends for the given account id",len(friends),sep=" ")
# for friend in friends:
# print(friend)
# helpers.write_to_a_file("tweets.json",tweets)
# helpers.write_to_a_file("friends.txt",friends)
# stream_tweets=TwitterStreamer()
# stream_tweets.stream_tweets(filename,hash_tag_list)
|
{"/stream_tweets.py": ["/credits.py", "/helpers.py"]}
|
15,336
|
Wise-Economy/plantix
|
refs/heads/master
|
/depth_first_search.py
|
from typing import Text as String
from PlantixCommunityService import PlantixCommunityService
def depth_first_search_recursive(reachable_nodes: set, start: String) -> set:
"""
Run the depth first search algorithm to identify all
the reachable nodes in a graph from the given start "expert" node.
This is a recursive DFS implementation. The memory implications may
be bad as python doesn't do "tail call optimization".
:param reachable_nodes: Set of all nodes reachable from the start
Initial value will be set() -> empty set.
:param start: uid of the expert "Node" from which the depth first search starts.
:return: Set of nodes that are reachable from start node.
"""
network = PlantixCommunityService()
if start not in reachable_nodes:
expert = network.get(start)
reachable_nodes.add(start)
for neighbour in expert.following:
depth_first_search_recursive(reachable_nodes, neighbour)
return reachable_nodes
def depth_first_search_iterative(start: String) -> set:
"""
Run the depth first search algorithm to identify all
the reachable nodes in a graph from the given start "expert" node.
This is a iterative DFS algorithm.
:param start: uid of the expert "Node" from which the depth first search starts.
:return: Set of nodes that are reachable from start node.
"""
stack = [start]
reachable_nodes = set()
network = PlantixCommunityService()
while stack:
current_expert_node = stack.pop()
if current_expert_node not in reachable_nodes:
reachable_nodes.add(current_expert_node)
expert = network.get(current_expert_node)
for following_expert_node in expert.following:
stack.append(following_expert_node)
return reachable_nodes
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,337
|
Wise-Economy/plantix
|
refs/heads/master
|
/helper.py
|
from PlantixCommunityService import PlantixCommunityService
def generate_plant_experts_topic_count_dict(experts: set) -> dict:
"""
This function returns dict with plant topics covered and the number of experts
per topic among the given set of experts.
:param network: Dict of network of plant experts with "uid" as the keys and values are the
corresponding PlantExpert objects.
:param experts: Set of experts.
:return: Dict containing plant topics(as key) covered among the experts and
their popularity count(as value which is the number of experts
available per topic)
"""
network = PlantixCommunityService()
plant_topic_count_dict = {}
for expert_id in experts:
expert = network.get(expert_id)
for plant_topic in expert.plants:
if plant_topic in plant_topic_count_dict:
plant_topic_count_dict[plant_topic] += 1
else:
plant_topic_count_dict[plant_topic] = 1
print(plant_topic_count_dict)
return plant_topic_count_dict
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,338
|
Wise-Economy/plantix
|
refs/heads/master
|
/plant_expert.py
|
from dataclasses import dataclass
from typing import Text as String, List
@dataclass
class PlantExpert(object):
"""
Represents a plantix community expert.
Each expert has a unique id, a list of plants
they can give advice on and a list of other
expert uid-s they follow.
"""
uid: String
plants: List[String]
following: List[String]
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,339
|
Wise-Economy/plantix
|
refs/heads/master
|
/PlantixCommunityService.py
|
import os
import json
from typing import Any as Json
from plant_expert import PlantExpert
class PlantixCommunityService(object):
"""Simulates the Plantix Community API in-memory and in-process.
"""
COMMUNITY_CACHE = json.load(open(
os.path.join(os.path.dirname(__file__), "community.json")
))
def get(self, uid: str) -> Json:
"""GET https://plantix.net/community/api/experts/:uid
"""
plants, following = self.COMMUNITY_CACHE[uid]
return PlantExpert(uid, plants, following)
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,340
|
Wise-Economy/plantix
|
refs/heads/master
|
/tests/unit_test.py
|
import unittest
import json
import os
from plant_expert import PlantExpert
from tests.plantix_test_helper import PlantixApiClientForTesting
from helper import generate_plant_experts_topic_count_dict
from depth_first_search import depth_first_search_iterative
class PlantixApiClientUnitTest(unittest.TestCase):
def _initiate_plantix_api_client(self):
self.file_path = os.path.join(os.path.dirname(__file__), "community.json")
self.plantix_api_client = PlantixApiClientForTesting(file_path=self.file_path)
self.plantix_api_client._generate_network()
def test_json_load(self):
self._initiate_plantix_api_client()
community_json = json.load(open(self.file_path))
assert self.plantix_api_client.COMMUNITY_SERVICE == community_json
def test_fetch(self):
self._initiate_plantix_api_client()
community_json = json.load(open(self.file_path))
uid = "0"
expert = PlantExpert(
uid=uid,
plants=community_json[uid][0],
following=community_json[uid][1],
)
assert self.plantix_api_client.fetch("0") == expert
def test_depth_first_search_iterative(self):
self._initiate_plantix_api_client()
# Node "3" is not reachable
for expert in ["0", "1", "2"]:
reachable_nodes = depth_first_search_iterative(
network=self.plantix_api_client.NETWORK,
start=expert,
)
assert reachable_nodes == set(["0", "1", "2"])
assert "3" not in reachable_nodes
# All nodes are reachable from "3"
reachable_nodes = depth_first_search_iterative(
network=self.plantix_api_client.NETWORK,
start="3",
)
assert reachable_nodes == set(["0", "1", "2", "3"])
def test_generate_plant_topic_count_dict(self):
self._initiate_plantix_api_client()
plant_topic_count_dict = generate_plant_experts_topic_count_dict(
network=self.plantix_api_client.NETWORK,
experts=set(["3"]),
)
assert plant_topic_count_dict == {"asparagus": 1, "beetroot": 1}
plant_topic_count_dict = generate_plant_experts_topic_count_dict(
network=self.plantix_api_client.NETWORK,
experts=set(["2", "1"]),
)
assert plant_topic_count_dict == {"pear": 2, "apple": 1}
if __name__ == '__main__':
unittest.main()
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,341
|
Wise-Economy/plantix
|
refs/heads/master
|
/plantix.py
|
import os
import json
from typing import Text as String, Dict
from depth_first_search import depth_first_search_iterative
from helper import generate_plant_experts_topic_count_dict
from plant_expert import PlantExpert
class PlantixApiClient(object):
"""
SDK for our Plantix Community API.
"""
def fetch(self, uid: String) -> PlantExpert:
"""
Fetch a plant expert by uid.
@param uid: ID of the expert to fetch
@raise KeyError: if no such uid exists
"""
plants, following = self.COMMUNITY_SERVICE[uid]
return PlantExpert(uid, plants, following)
def find_topics(self, start: String, n: int) -> tuple:
"""
Find the 'n' most covered plant topics in the network of experts reachable for the expert
with uid=start.
:param start: ID of the "start" expert to calculate the n
most covered plants in the network of experts
reachable for this expert.
:param n: Number of most covered plants in the network of experts reachable for this expert.
:return: Tuple with 'n' most covered plants in the network.
"""
reachable_experts = depth_first_search_iterative(
start=start,
)
plant_experts_count_dict = generate_plant_experts_topic_count_dict(
experts=reachable_experts,
)
top_n_plant_topics = sorted(
plant_experts_count_dict,
key=plant_experts_count_dict.get,
reverse=True,
)[:n]
return tuple(top_n_plant_topics)
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,342
|
Wise-Economy/plantix
|
refs/heads/master
|
/tests/test_payload_generator.py
|
import json
import random
import string
def generate_payload(n: int, clique: bool):
possible_plant_topics = [id_generator() for i in range(10)]
for node in range(n):
pass
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,343
|
Wise-Economy/plantix
|
refs/heads/master
|
/tests/integration_test.py
|
from plantix import PlantixApiClient
from plant_expert import PlantExpert
import os
from tests.plantix_test_helper import PlantixApiClientForTesting
import unittest
class PlantixApiClientIntegrationTest(unittest.TestCase):
def _initiate_plantix_api_client(self):
self.file_path = os.path.join(os.path.dirname(__file__), "community.json")
self.plantix_api_client = PlantixApiClientForTesting(file_path=self.file_path)
def test_find_topics(self):
"""
"beetroot" & "asparagus" never appear in this list
as the expert "3" is not followed by anyone
"""
self._initiate_plantix_api_client()
result = self.plantix_api_client.find_topics(start="0", n=2)
assert result == ('pear', 'apple')
result = self.plantix_api_client.find_topics(start="1", n=2)
assert result == ('pear', 'apple')
result = self.plantix_api_client.find_topics(start="2", n=2)
assert set(result) == set(['pear', 'apple'])
result = self.plantix_api_client.find_topics(start="3", n=2)
assert result == ('pear', 'apple')
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,344
|
Wise-Economy/plantix
|
refs/heads/master
|
/tests/plantix_test_helper.py
|
from plantix import PlantixApiClient
import json
class PlantixApiClientForTesting(PlantixApiClient):
def __init__(self, file_path=None):
if file_path is not None:
self.COMMUNITY_SERVICE = json.load(open(file_path))
|
{"/depth_first_search.py": ["/PlantixCommunityService.py"], "/helper.py": ["/PlantixCommunityService.py"], "/PlantixCommunityService.py": ["/plant_expert.py"], "/tests/unit_test.py": ["/plant_expert.py", "/tests/plantix_test_helper.py", "/helper.py", "/depth_first_search.py"], "/plantix.py": ["/depth_first_search.py", "/helper.py", "/plant_expert.py"], "/tests/integration_test.py": ["/plantix.py", "/plant_expert.py", "/tests/plantix_test_helper.py"], "/tests/plantix_test_helper.py": ["/plantix.py"]}
|
15,345
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/courses/fields.py
|
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
"""
custom django model field OrederField. it has optional for_fields param which is ordering
by overriding pre_save method:
1. checking if value already exist
2. Build queryset to retrive all obj of model
3. if there are any names in for_fields attr and filter the queryset by the current value of
model fields in for_fields
4. retrive obj with the highest (latest) order, if there is none order att is set to 0
5. else if there is an obj +1 is added to order attr
6. in setattr() order is calculated and set
"""
class OrderField(models.PositiveIntegerField):
def __init__(self, for_fields=None, *args, **kwargs):
self.for_fields = for_fields
super().__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
if getattr(model_instance, self.attname) is None:
# no current value
try:
qs = self.model.objects.all()
if self.for_fields:
# filter by objects with the same field values
# for the fields in "for_fields"
query = {field: getattr(model_instance, field) for field in self.for_fields}
qs = qs.filter(**query)
# get the order of the last item
last_item = qs.latest(self.attname)
value = last_item.order + 1
except ObjectDoesNotExist:
value = 0
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,346
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/chat/consumers.py
|
import json
from channels.generic.websocket import AsyncWebsocketConsumer
from django.utils import timezone
class ChatConsumer(AsyncWebsocketConsumer):
"""
Consumer to connect, disconnect and receive messages text as jsons
by the key and then echo it via self.send().
ChatConsumer is using asyncwebsocket for asynchronous calls
async def + await: for async functioning
"""
async def connect(self):
"""
async_to_sync(): wraps calls to asynchronous channel layer
Retrieve id of course for chat, build chat for course and name it,
provide "add to chat" functionality, keep the call by self.accept() with
WebSocket connection.
:return:
"""
self.user = self.scope['user']
self.id = self.scope['url_route']['kwargs']['course_id']
self.room_group_name = 'chat_%s' % self.id
# join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
# accept connection
await self.accept()
async def disconnect(self, close_code):
# leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
now = timezone.now()
# send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'user': self.user.username,
'datetime': now.isoformat()
}
)
# retrieve message from room group
async def chat_message(self, event):
# send message to WebSocket, match "type" key with group
await self.send(text_data=json.dumps(event))
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,347
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/courses/forms.py
|
from django.forms import inlineformset_factory
from courses.models import Course, Module
"""
Formset to validate all forms
fields: fields included in all forms
extra: number of extra empty forms to display
can_delete: if True Django will display checkbox input to mark objects to delete
"""
ModuleFormSet = inlineformset_factory(Course,
Module,
fields=['title', 'description'],
extra=2,
can_delete=True)
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,348
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/e_learning_Django3/settings.py
|
"""
Django settings for e_learning_Django3 project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
import redis
from django.urls import reverse_lazy
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# reading .env file
environ.Env.read_env()
# False if not in os.environ
DEBUG = env('DEBUG')
# Raises django's ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: keep the secret key used in production secret!
ALLOWED_HOSTS = ['e-learning-django3.herokuapp.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'courses.apps.CoursesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'students.apps.StudentsConfig',
'embed_video',
'memcache_status',
'rest_framework',
'chat',
'channels',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'e_learning_Django3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'e_learning_Django3.wsgi.application'
ASGI_APPLICATION = 'e_learning_Django3.routing.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = 'media/'
# after login user is redirected to his/hers course list
LOGIN_REDIRECT_URL = reverse_lazy('student_course_list')
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# }
# }
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_SECONDS = 60 * 15 # 15 minutes
CACHE_MIDDLEWARE_KEY_PREFIX = 'educa'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [('127.0.0.1', 6379)],
},
},
}
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
DATABASE_URL = env('DATABASE_URL')
r = redis.from_url(os.environ.get("REDIS_URL"))
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,349
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/students/forms.py
|
from django import forms
from courses.models import Course
"""
This form is not to be displayed, it's function for "enroll" button in
course detail view.
"""
class CourseEnrollForm(forms.Form):
course = forms.ModelChoiceField(queryset=Course.objects.all(),
widget=forms.HiddenInput)
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,350
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/courses/templatetags/course.py
|
from django import template
register = template.Library()
# model_name template custom filter to apply in templates "|model_name"
@register.filter
def model_name(obj):
try:
return obj._meta.model_name
except AttributeError:
return None
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,351
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/courses/migrations/0006_auto_20201206_1337.py
|
# Generated by Django 3.1.4 on 2020-12-06 13:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0005_course_student'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='student',
new_name='students',
),
]
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,352
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/courses/api/views.py
|
from django.shortcuts import get_object_or_404
from rest_framework import generics, viewsets
from rest_framework.authentication import BasicAuthentication
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from courses.api.permissions import IsEnrolled
from courses.api.serializers import SubjectSerializer, CourseSerializer, CourseWithContentsSerializer
from courses.models import Subject, Course
class SubjectListView(generics.ListAPIView):
queryset = Subject.objects.all()
serializer_class = SubjectSerializer
class SubjectDetailView(generics.RetrieveAPIView):
queryset = Subject.objects.all()
serializer_class = SubjectSerializer
class CourseViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseSerializer
@action(detail=True,
methods=['post'],
authentication_classes=[BasicAuthentication],
permission_classes=[IsAuthenticated])
def enroll(self, request, *args, **kwargs):
"""
Custom method enables to enroll students for courses, detail=True
specifies that action is taken on single obj (e.g.course), for method
'post' with auth/perms specified perform enroll func: get current course
enroll current user to it.
"""
course = self.get_object()
course.students.add(request.user)
return Response({'enrolled': True})
@action(detail=True,
methods=['get'],
serializer_class=CourseWithContentsSerializer,
authentication_classes=[BasicAuthentication],
permission_classes=[IsEnrolled, IsAuthenticated])
def contents(self, request, *args, **kwargs):
"""
GET single object (course) to render using CourseWithContentsSerializer
and check auth/perms and returns course object via retrieve.
"""
return self.retrieve(request, *args, **kwargs)
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,353
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/courses/migrations/0004_auto_20201206_1231.py
|
# Generated by Django 3.1.4 on 2020-12-06 12:31
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0003_auto_20201205_1951'),
]
operations = [
migrations.AlterField(
model_name='module',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='modules',
to='courses.course'),
),
]
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,354
|
erobakiewicz/e_learning_Djagno3
|
refs/heads/master
|
/courses/views.py
|
from braces.views import CsrfExemptMixin, JsonRequestResponseMixin
from django.apps import apps
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.core.cache import cache
from django.db.models import Count
from django.forms import modelform_factory
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse_lazy
from django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView
from django.views.generic.base import TemplateResponseMixin, View
from courses.forms import ModuleFormSet
from courses.models import Course, Module, Content, Subject
# overriding get_queryset method to get only content created by logged in user
from students.forms import CourseEnrollForm
# def mixins
class OwnerMixin(object):
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(owner=self.request.user)
# overriding form_valid method to set owner to current user
class OwnerEditMixin(object):
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class OwnerCourseMixin(OwnerMixin,
LoginRequiredMixin,
PermissionRequiredMixin):
model = Course
fields = ['subject', 'title', 'slug', 'overview']
success_url = reverse_lazy('manage_course_list')
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
template_name = 'courses/manage/course/form.html'
# generic views for editing, createing, updateing and deleting courses
class ManageCourseListView(OwnerCourseMixin, ListView):
template_name = 'courses/manage/course/list.html'
permission_required = 'courses.view_course'
class CourseCreateView(OwnerCourseEditMixin, CreateView):
permission_required = 'courses.add_course'
class CourseUpdateView(OwnerCourseEditMixin, UpdateView):
permission_required = 'courses.change_course'
class CourseDeleteView(OwnerCourseMixin, DeleteView):
template_name = 'courses/manage/course/delete.html'
permission_required = 'courses.delete_course'
"""
View handles formset functionality to add, update and delete courses.
Inherits from TemplateResponseMixin (requires HTTP response and allows
method "render_to_response" which allows to render template and View()
basic generic view which has methods:
get_formset(): allows to use formset to build view
dispatch(): takes HTTP request and it's params and delegate rendering
to specific method here get or post using get_object_or_404 to get
course and its ID/PK and user
get(): execute GET HTTP request and builds view from formset
post(): build formset instance using submitted data, execute is_valid() method
from formset, if valid saves data by adding, updating or deleting it.
"""
class CourseModuleUpdateView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/formset.html'
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course,
data=data)
def dispatch(self, request, pk):
self.course = get_object_or_404(Course,
id=pk,
owner=request.user)
return super().dispatch(request, pk)
def get(self, request, *args, **kwargs):
formset = self.get_formset()
return self.render_to_response({'course': self.course,
'formset': formset})
def post(self, request, *args, **kwargs):
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course': self.course,
'formset': formset})
"""
View that allows to create or updated different models' contents.
get_model(): checks if given model is either text, video, image or file
it uses django apps to get valid existing model name
get_form(): builds dynamic form using modelform_factory() function
for given model
dispatch(): receives URL params and stores corresponding module, model
and content obj as class attributes (module_id, model_name, id of obj if
it is updated if not its None).
get(): builds the model form for given model (text, video,image or file)
instance with given ID or if the instance is created ID = None
post(): passes the submitted data and files, validate it, if valid
create new object/update and assign to logged in user, if no ID is
provided, new obj is created.
"""
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = 'courses/manage/content/form.html'
def get_model(self, model_name):
if model_name in ['text', 'video', 'image', 'file']:
return apps.get_model(app_label='courses',
model_name=model_name)
def get_form(self, model, *args, **kwargs):
Form = modelform_factory(model, exclude=['owner',
'order',
'created'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
self.module = get_object_or_404(Module,
id=module_id,
course__owner=request.user
)
self.model = self.get_model(model_name)
if id:
self.obj = get_object_or_404(self.model,
id=id,
owner=request.user)
return super().dispatch(request, module_id, model_name, id)
def get(self, request, module_id, model_name, id=None):
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form': form,
'object': self.obj})
def post(self, request, module_id, model_name, id=None):
form = self.get_form(self.model,
instance=self.obj,
data=request.POST,
files=request.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# new content, doesn't have an ID
Content.objects.create(module=self.module,
item=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form,
'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content,
id=id,
module__course__owner=request.user)
module = content.module
content.item.delete()
content.delete()
return redirect('module_content_list', module.id)
class ModuleContentListView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/content_list.html'
def get(self, request, module_id):
module = get_object_or_404(Module,
id=module_id,
course__owner=request.user)
return self.render_to_response({'module': module})
# Views responsible for AJAX functionalities in management of courses
class ModuleOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id,
course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class ContentOrderView(CsrfExemptMixin, JsonRequestResponseMixin, View):
def post(self, request):
for id, order in self.request_json.items():
Content.objects.filter(id=id,
module__course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
"""
View that displays courses.
get(): using annotate/Count it aggregate all courses and modules (total number),
if subject is given the list is filtered to fit the subject.
"""
class CourseListView(TemplateResponseMixin, View):
model = Course
template_name = 'courses/course/list.html'
"""
get(): provide by default all courses if you filter by subject show them (also cache them)
"""
def get(self, request, subject=None):
subjects = cache.get('all_subjects')
if not subjects:
subjects = Subject.objects.annotate(total_courses=Count('courses'))
cache.set('all_subjects', subjects)
all_courses = Course.objects.annotate(total_modules=Count('modules'))
if subject:
subject = get_object_or_404(Subject, slug=subject)
key = f'subject_{subject.id}_courses'
courses = cache.get(key)
if not courses:
courses = all_courses.filter(subject=subject)
cache.set(key, courses)
else:
courses = cache.get('all_courses')
if not courses:
courses = all_courses
cache.set('all_courses', courses)
return self.render_to_response({'subjects': subjects,
'subject': subject,
'courses': courses})
class CourseDetailView(DetailView):
model = Course
template_name = 'courses/course/detail.html'
# adds to ctx enrollment form (it's get rendered as a button)
def get_context_data(self, **kwargs):
context = super(CourseDetailView, self).get_context_data(**kwargs)
context['enroll_form'] = CourseEnrollForm(
initial={'course': self.object}
)
return context
|
{"/courses/views.py": ["/courses/forms.py", "/students/forms.py"]}
|
15,355
|
alexalevtmp/stepik575FinalProject
|
refs/heads/master
|
/pages/login_page.py
|
from .base_page import BasePage
from .locators import LoginPageLocators
class LoginPage(BasePage):
def should_be_login_page(self):
self.should_be_login_url()
self.should_be_login_form()
self.should_be_register_form()
def should_be_login_url(self):
# реализуйте проверку на корректный url адрес
# assert self.url.find("/login/") != -1, "Login url is not presented"
# assert self.is_element_present(*LoginPageLocators.LOGIN_LINK), "Login link is not presented"
assert self.browser.current_url.find("login") != -1, "Login url is not presented"
# current_url
# Gets the URL of the current page.
# Usage: driver.current_url
def should_be_login_form(self):
# реализуйте проверку, что есть форма логина
assert self.is_element_present(*LoginPageLocators.LOGIN_FORM), "Login form is not presented"
def should_be_register_form(self):
# реализуйте проверку, что есть форма регистрации на странице
assert self.is_element_present(*LoginPageLocators.REGISTER_FORM), "Registration form is not presented"
|
{"/test_main_page.py": ["/pages/login_page.py"], "/test_product_page.py": ["/pages/product_page.py"]}
|
15,356
|
alexalevtmp/stepik575FinalProject
|
refs/heads/master
|
/test_main_page.py
|
from .pages.main_page import MainPage
from .pages.login_page import LoginPage
# link for 4.3-2
# link = "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209?promo=midsummer"
# link for 4.3-3
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=newYear2019"
# link = "http://selenium1py.pythonanywhere.com/"
# link_login = "http://selenium1py.pythonanywhere.com/en-gb/accounts/login/"
# def test_guest_can_go_to_login_page(browser):
# page = MainPage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес
# page.open() # открываем страницу
# page.go_to_login_page() # выполняем метод страницы - переходим на страницу логина
# def test_guest_should_see_login_link(browser):
# page = MainPage(browser, link)
# page.open()
# page.should_be_login_link()
# # Второй подход: переход происходит неявно, страницу инициализируем в теле теста:
# # 1. Закомментируйте строку с возвращаемым значением
def go_to_login_page(self):
link = self.browser.find_element(*MainPageLocators.LOGIN_LINK)
link.click()
# return LoginPage(browser=self.browser, url=self.browser.current_url)
# # 2. Инициализируем LoginPage в теле теста (не забудьте импортировать в файл нужный класс):
# # from .pages.login_page import LoginPage
def test_guest_can_go_to_login_page(browser):
link = "http://selenium1py.pythonanywhere.com"
page = MainPage(browser, link)
page.open()
page.go_to_login_page()
login_page = LoginPage(browser, browser.current_url)
login_page.should_be_login_page()
## from prev step!
# def test_should_be_login_page(browser):
# page = LoginPage(browser, link_login)
# page.open()
# page.should_be_login_page()
# "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209?promo=midsummer"
# "http://selenium1py.pythonanywhere.com/"
# "http://selenium1py.pythonanywhere.com/en-gb/accounts/login/"
# Wed Jul 31 20:36:11 UTC 2019
# (selenium_env) bash-3.2$ pytest -v --tb=line --language=en test_main_page.py
# ============================================================ test session starts =============================================================
# platform darwin -- Python 3.7.3, pytest-3.10.1, py-1.8.0, pluggy-0.12.0 -- /anaconda3/envs/selenium_env/bin/python
# cachedir: .pytest_cache
# rootdir: /Users/asl/stepik575/stepik575FinalProject, inifile:
# plugins: rerunfailures-3.1
# collected 3 items
# test_main_page.py::test_guest_can_go_to_login_page PASSED [ 33%]
# test_main_page.py::test_guest_should_see_login_link PASSED [ 66%]
# test_main_page.py::test_should_be_login_page PASSED [100%]
# ========================================================= 3 passed in 12.95 seconds ==========================================================
|
{"/test_main_page.py": ["/pages/login_page.py"], "/test_product_page.py": ["/pages/product_page.py"]}
|
15,357
|
alexalevtmp/stepik575FinalProject
|
refs/heads/master
|
/test_product_page.py
|
from .pages.product_page import ProductPage
from .pages.locators import ProductPageLocators # ADD_TO_CART
# locators.py
# class ProductPageLocators(object):
# ADD_TO_CART = (By.CSS_SELECTOR, "btn-add-to-basket")
# def test_guest_can_go_to_login_page(browser):
# page = MainPage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес
# page.open() # открываем страницу
# page.go_to_login_page() # выполняем метод страницы - переходим на страницу логина
link = "http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209/?promo=newYear"
def test_guest_can_add_product_to_cart(browser):
page = ProductPage(browser, link)
page.open()
page.add_to_cart()
# add_to_cart()
|
{"/test_main_page.py": ["/pages/login_page.py"], "/test_product_page.py": ["/pages/product_page.py"]}
|
15,358
|
alexalevtmp/stepik575FinalProject
|
refs/heads/master
|
/pages/product_page.py
|
from .base_page import BasePage
from .locators import ProductPageLocators
# from .locators import MainPageLocators
class ProductPage(BasePage):
def add_to_cart(self):
login_link = self.browser.find_element(*ProductPageLocators.ADD_TO_CART)
login_link.click()
self.solve_quiz_and_get_code()
# pass
def guest_can_add_product_to_cart(self):
pass
# class MainPage(BasePage):
# def go_to_login_page(self):
# login_link = self.browser.find_element(*MainPageLocators.LOGIN_LINK)
# login_link.click()
# # def should_be_login_link(self):
# # assert self.is_element_present(By.CSS_SELECTOR, "#registration_link"), "Login link is not presented"
# def should_be_login_link(self):
# assert self.is_element_present(*MainPageLocators.LOGIN_LINK), "Login link is not presented"
|
{"/test_main_page.py": ["/pages/login_page.py"], "/test_product_page.py": ["/pages/product_page.py"]}
|
15,365
|
waliamehak/LearnML
|
refs/heads/main
|
/admin_classifier/apps.py
|
from django.apps import AppConfig
class AdminClassifierConfig(AppConfig):
name = 'admin_classifier'
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,366
|
waliamehak/LearnML
|
refs/heads/main
|
/user_classifier/apps.py
|
from django.apps import AppConfig
class UserClassifierConfig(AppConfig):
name = 'user_classifier'
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,367
|
waliamehak/LearnML
|
refs/heads/main
|
/LearnML Files/Temp Files/prac.py
|
# Importing the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importing the dataset
dataset = pd.read_csv("50_Startups.csv")
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X = sc_X.fit_transform(X)
# Fitting the model with training dataset
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X, y)
import pickle
with open('regressor.pkl', 'wb') as file:
pickle.dump(regressor, file)
print("Done")
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,368
|
waliamehak/LearnML
|
refs/heads/main
|
/admin_classifier/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.Home.as_view(), name='admin_home'),
path('classification', views.Classification.as_view(), name='admin_classification'),
path('regression', views.Regression.as_view(), name='admin_regression'),
path('clustering', views.Clustering.as_view(), name='admin_clustering')
]
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,369
|
waliamehak/LearnML
|
refs/heads/main
|
/admin_classifier/views.py
|
# Django Imports
from django.shortcuts import render
from django.views import View
# Internal Imports
from . import mongodb as mdb
# Python Package Imports
import pickle
import pandas as pd
from io import StringIO
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from bson.binary import Binary
from base64 import b64encode
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.cluster import KMeans
from abc import ABC, abstractmethod
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.preprocessing import StandardScaler
data = pd.DataFrame({})
db_data = mdb.access()
# Home Module Starts
class Home(View):
template_name = 'admin_classifier/home.html'
context = {}
def get(self, request):
return render(request, self.template_name, self.context)
# Home Module Ends
# Simple Factory Pattern Starts:
class Algorithm(View):
def get(self, request):
pass
def post(self, request):
pass
@staticmethod
def description_update(algo_desc, ds_desc, algo_name):
update_data = {'algo_desc': str(algo_desc), 'ds_desc': str(ds_desc)}
update_message = mdb.update(db_data, algo_name, update_data, "Descriptions Updated",
"Unexpected error while updating descriptions")
return update_message
@staticmethod
def pkl_upload(upload_file, algo_name):
if upload_file.content_type == 'application/octet-stream':
pkl_obj = pickle.dumps(upload_file)
mongo_data = {'pkl_data': Binary(pkl_obj), 'upload_method': 'pkl'}
pkl_message = mdb.update(db_data, algo_name, mongo_data, "File Uploaded",
"Unexpected error while uploading pickle file")
else:
pkl_message = "Invalid File Type"
return pkl_message
@staticmethod
def pkl_change(pkl_features_temp, algo_name, pkl_label_notes_temp=None):
pkl_features = []
for feature in pkl_features_temp:
pkl_features.append(feature.strip())
if algo_name != "MLR":
pkl_label_notes = {}
for label in pkl_label_notes_temp:
temp = label.split("=")
pkl_label_notes[temp[0].strip()] = temp[1].strip()
mongo_data = {'label_notes': pkl_label_notes, 'training_features': pkl_features}
else:
mongo_data = {'training_features': pkl_features}
pkl_change_message = mdb.update(db_data, algo_name, mongo_data, "Success", "Error")
return pkl_change_message
@staticmethod
def graph_upload(image_file, algo_name):
if image_file.content_type == 'image/png' or image_file.content_type == 'image/jpeg' or image_file.content_type == 'image/jpg':
image_data = image_file.read()
encoded_image = str(b64encode(image_data))[2:-1]
mime = str(image_file.content_type)
mime = mime + ';' if mime else ';'
graph_image = "data:%sbase64,%s" % (mime, encoded_image)
mongo_data = {'graph_image': graph_image}
graph_message = mdb.update(db_data, algo_name, mongo_data, "Success", "Error")
else:
graph_message = "Invalid Image Type"
return graph_message
@staticmethod
def csv_upload(upload_file):
global data
feature_list = []
if upload_file.content_type == 'application/vnd.ms-excel' and upload_file.size < 50000:
string_data = StringIO(upload_file.read().decode('utf-8'))
data = pd.read_csv(string_data)
feature_list = list(data.columns)
csv_message = "File Uploaded"
else:
if upload_file.content_type != 'application/vnd.ms-excel':
csv_message = "Invalid File Type"
else:
csv_message = "Maximum size of the CSV file should be less than 50kb"
return feature_list, csv_message
@staticmethod
def scale(X_train, X_test):
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
return pickle.dumps(sc), X_train, X_test
class Classification(Algorithm):
template_name = 'admin_classifier/classification.html'
context = {}
feature_list = []
submit_button = None
message = None
pkl_message = None
pkl_change_message = None
csv_message = None
update_message = None
accuracy = None
f1_score = None
algo_desc = None
ds_desc = None
def get(self, request):
descriptions = mdb.find(db_data, "KNN")
self.algo_desc = descriptions['algo_desc']
self.ds_desc = descriptions['ds_desc']
self.context = {'algo_desc': self.algo_desc, 'ds_desc': self.ds_desc}
return render(request, self.template_name, self.context)
def post(self, request):
global data
descriptions = mdb.find(db_data, "KNN")
self.algo_desc = descriptions['algo_desc']
self.ds_desc = descriptions['ds_desc']
if 'update' in request.POST:
self.algo_desc = request.POST.get('algo_desc')
self.ds_desc = request.POST.get('ds_desc')
self.update_message = Algorithm.description_update(self.algo_desc, self.ds_desc, 'KNN')
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Classification Description")
self.context = {'update_message': self.update_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'pkl' in request.FILES:
upload_file = request.FILES['pkl']
self.pkl_message = Algorithm.pkl_upload(upload_file, 'KNN')
self.context = {'pkl_message': self.pkl_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'pkl_change' in request.POST:
pkl_features_temp = str(request.POST.get('pkl_features')).split(',')
pkl_label_notes_temp = str(request.POST.get("pkl_label_notes")).split("\r\n")
image_file = request.FILES['graph_image']
pkl_change_message_temp = Algorithm.pkl_change(pkl_features_temp, "KNN", pkl_label_notes_temp)
graph_message = Algorithm.graph_upload(image_file, "KNN")
if pkl_change_message_temp == "Success" and graph_message == "Success":
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Classification Pickle File")
self.pkl_change_message = "Changes Saved Successfully"
else:
if graph_message != "Success":
self.pkl_change_message = "Invalid Image Type"
else:
self.pkl_change_message = "Unexpected error while saving pickle changes"
self.context = {'pkl_change_message': self.pkl_change_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'csv' in request.FILES:
upload_file = request.FILES['csv']
self.feature_list, self.csv_message = Algorithm.csv_upload(upload_file)
self.context = {'csv_message': self.csv_message, 'features': self.feature_list, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'submit' in request.POST:
self.submit_button = request.POST.get("submit")
image_file = request.FILES['csv_image']
graph_message = Algorithm.graph_upload(image_file, "KNN")
if graph_message == "Success":
n_neighbors = int(request.POST.get("neighbors"))
leaf_size = int(request.POST.get("leaf"))
weights = str(request.POST.get("weights"))
algorithm = str(request.POST.get("algorithm"))
training_features = list(request.POST.getlist("training_features"))
training_label = str(request.POST.get("training_label"))
csv_label_notes_temp = str(request.POST.get("csv_label_notes")).split("\r\n")
try:
csv_label_notes = {}
for label in csv_label_notes_temp:
temp = label.split("=")
csv_label_notes[temp[0].strip()] = temp[1].strip()
X = data.loc[:, training_features].values
y = data.loc[:, training_label].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
classifier = KNeighborsClassifier(n_neighbors=n_neighbors,
leaf_size=leaf_size,
weights=weights,
algorithm=algorithm)
sc, X_train, X_test = Algorithm.scale(X_train, X_test)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
self.accuracy = round(accuracy_score(y_test, y_pred) * 100, 2)
self.f1_score = round(f1_score(y_test, y_pred) * 100, 2)
if self.accuracy >= 90 and self.f1_score >= 80:
pkl_obj = pickle.dumps(classifier)
mongo_data = {'pkl_data': Binary(pkl_obj), 'training_features': training_features,
'label_notes': csv_label_notes, 'upload_method': 'csv',
'testing_accuracy': self.accuracy, 'f1_score': self.f1_score,
'scaling_obj': sc}
self.message = mdb.update(db_data, "KNN", mongo_data, "Model Successfully Trained",
"Unexpected error while training the model")
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Classification CSV File")
else:
self.message = "Model Training Failed. The accuracy of the model should be > 90% and " \
"the F1-Score should be > 80%. However, this model's accuracy " \
"is " + str(self.accuracy) + "% and the F1-Score " \
"is " + str(self.f1_score) + "%."
except:
self.message = "Unexpected error while training the model"
else:
self.message = "Invalid Image Type"
self.context = {'submitbutton': self.submit_button, 'pkl_message': self.pkl_message,
'csv_message': self.csv_message, 'accuracy': self.accuracy, 'message': self.message,
'algo_desc': self.algo_desc, 'ds_desc': self.ds_desc, 'f1_score': self.f1_score}
return render(request, self.template_name, self.context)
class Regression(Algorithm):
template_name = 'admin_classifier/regression.html'
context = {}
feature_list = []
submit_button = None
message = None
pkl_message = None
pkl_change_message = None
csv_message = None
update_message = None
rmse = None
algo_desc = None
ds_desc = None
def get(self, request):
descriptions = mdb.find(db_data, "MLR")
self.algo_desc = descriptions['algo_desc']
self.ds_desc = descriptions['ds_desc']
self.context = {'algo_desc': self.algo_desc, 'ds_desc': self.ds_desc}
return render(request, self.template_name, self.context)
def post(self, request):
global data
descriptions = mdb.find(db_data, "MLR")
self.algo_desc = descriptions['algo_desc']
self.ds_desc = descriptions['ds_desc']
if 'update' in request.POST:
self.algo_desc = request.POST.get('algo_desc')
self.ds_desc = request.POST.get('ds_desc')
self.update_message = Algorithm.description_update(self.algo_desc, self.ds_desc, 'MLR')
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Regression Description")
self.context = {'update_message': self.update_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'pkl' in request.FILES:
upload_file = request.FILES['pkl']
self.pkl_message = Algorithm.pkl_upload(upload_file, 'MLR')
self.context = {'pkl_message': self.pkl_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'pkl_change' in request.POST:
pkl_features_temp = str(request.POST.get('pkl_features')).split(',')
image_file = request.FILES['graph_image']
pkl_change_message_temp = Algorithm.pkl_change(pkl_features_temp, "MLR")
graph_message = Algorithm.graph_upload(image_file, "MLR")
if pkl_change_message_temp == "Success" and graph_message == "Success":
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Regression Pickle File")
self.pkl_change_message = "Changes Saved Successfully"
else:
if graph_message != "Success":
self.pkl_change_message = "Invalid Image Type"
else:
self.pkl_change_message = "Unexpected error while saving pickle changes"
self.context = {'pkl_change_message': self.pkl_change_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'csv' in request.FILES:
upload_file = request.FILES['csv']
self.feature_list, self.csv_message = Algorithm.csv_upload(upload_file)
self.context = {'csv_message': self.csv_message, 'features': self.feature_list, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'submit' in request.POST:
self.submit_button = request.POST.get("submit")
image_file = request.FILES['csv_image']
graph_message = Algorithm.graph_upload(image_file, "MLR")
if graph_message == "Success":
fit_intercept = str(request.POST.get("fit_intercept"))
fit_intercept = True if fit_intercept == "True" else False
normalize = str(request.POST.get("normalize"))
normalize = True if normalize == "True" else False
training_features = list(request.POST.getlist("training_features"))
training_label = str(request.POST.get("training_label"))
try:
X = data.loc[:, training_features].values
y = data.loc[:, training_label].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
sc, X_train, X_test = Algorithm.scale(X_train, X_test)
regressor = LinearRegression(fit_intercept=fit_intercept,
normalize=normalize)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
self.rmse = round((mean_squared_error(y_test, y_pred))**0.5, 2)
print(self.rmse, y_test.mean()*0.1)
if self.rmse <= (y_test.mean()*0.1):
pkl_obj = pickle.dumps(regressor)
mongo_data = {'pkl_data': Binary(pkl_obj), 'training_features': training_features,
'rmse': self.rmse, 'scaling_obj': sc, 'upload_method': 'csv'}
self.message = mdb.update(db_data, "MLR", mongo_data, "Model Successfully Trained",
"Unexpected error while training the model")
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Regression CSV File")
else:
self.message = "Model Training Failed. The RMSE values of the model should be " \
"less than 10% of the actual values. For this model, 10% of the actual value " \
"is " + str(round(y_test.mean()*0.1, 2)) + " and the RMSE value " \
"is " + str(self.rmse)
except:
self.message = "Unexpected error while training the model"
else:
self.message = "Invalid Image Type"
self.context = {'submitbutton': self.submit_button, 'pkl_message': self.pkl_message,
'csv_message': self.csv_message, 'rmse': self.rmse, 'message': self.message,
'algo_desc': self.algo_desc, 'ds_desc': self.ds_desc}
return render(request, self.template_name, self.context)
class Clustering(Algorithm):
template_name = 'admin_classifier/clustering.html'
context = {}
feature_list = []
submit_button = None
message = None
pkl_message = None
pkl_change_message = None
csv_message = None
update_message = None
algo_desc = None
ds_desc = None
def get(self, request):
descriptions = mdb.find(db_data, "KM")
self.algo_desc = descriptions['algo_desc']
self.ds_desc = descriptions['ds_desc']
self.context = {'algo_desc': self.algo_desc, 'ds_desc': self.ds_desc}
return render(request, self.template_name, self.context)
def post(self, request):
global data
descriptions = mdb.find(db_data, "KM")
self.algo_desc = descriptions['algo_desc']
self.ds_desc = descriptions['ds_desc']
if 'update' in request.POST:
self.algo_desc = request.POST.get('algo_desc')
self.ds_desc = request.POST.get('ds_desc')
self.update_message = Algorithm.description_update(self.algo_desc, self.ds_desc, 'KM')
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Clustering Description")
self.context = {'update_message': self.update_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'pkl' in request.FILES:
upload_file = request.FILES['pkl']
self.pkl_message = Algorithm.pkl_upload(upload_file, 'KM')
self.context = {'pkl_message': self.pkl_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'pkl_change' in request.POST:
pkl_features_temp = str(request.POST.get('pkl_features')).split(',')
pkl_label_notes_temp = str(request.POST.get("pkl_label_notes")).split("\r\n")
image_file = request.FILES['graph_image']
pkl_change_message_temp = Algorithm.pkl_change(pkl_features_temp, "KM", pkl_label_notes_temp)
graph_message = Algorithm.graph_upload(image_file, "KM")
if pkl_change_message_temp == "Success" and graph_message == "Success":
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Clustering Pickle File")
self.pkl_change_message = "Changes Saved Successfully"
else:
if graph_message != "Success":
self.pkl_change_message = "Invalid Image Type"
else:
self.pkl_change_message = "Unexpected error while saving pickle changes"
self.context = {'pkl_change_message': self.pkl_change_message, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'csv' in request.FILES:
upload_file = request.FILES['csv']
self.feature_list, self.csv_message = Algorithm.csv_upload(upload_file)
self.context = {'csv_message': self.csv_message, 'features': self.feature_list, 'algo_desc': self.algo_desc,
'ds_desc': self.ds_desc}
elif 'submit' in request.POST:
self.submit_button = request.POST.get("submit")
image_file = request.FILES['csv_image']
graph_message = Algorithm.graph_upload(image_file, "KM")
if graph_message == "Success":
n_clusters = int(request.POST.get("n_clusters"))
init = str(request.POST.get("init"))
n_init = int(request.POST.get("n_init"))
max_iter = int(request.POST.get("max_iter"))
training_features = list(request.POST.getlist("training_features"))
X = data.loc[:, training_features].values
kmeans = KMeans(n_clusters=n_clusters, init=init, n_init=n_init, max_iter=max_iter)
try:
kmeans.fit(X)
csv_label_notes_temp = str(request.POST.get("csv_label_notes")).split("\r\n")
csv_label_notes = {}
for label in csv_label_notes_temp:
temp = label.split("=")
values = (temp[0].strip()).split(",")
values = list(map(int, values))
key = kmeans.predict([values])[0]
csv_label_notes[str(key)] = temp[1].strip()
pkl_obj = pickle.dumps(kmeans)
mongo_data = {'pkl_data': Binary(pkl_obj), 'training_features': training_features,
'label_notes': csv_label_notes, 'upload_method': 'csv'}
self.message = mdb.update(db_data, "KM", mongo_data, "Model Successfully Trained",
"Unexpected error while training the model")
op_data = mdb.find(db_data, 'OP')
subject_obj = ConcreteSubject(op_data['observer_list'], op_data['update_message_list'])
subject_obj.notify("Clustering CSV File")
except:
self.message = "Unexpected error while training the model"
else:
self.message = "Invalid Image Type"
self.context = {'submitbutton': self.submit_button, 'pkl_message': self.pkl_message,
'csv_message': self.csv_message, 'message': self.message,
'algo_desc': self.algo_desc, 'ds_desc': self.ds_desc}
return render(request, self.template_name, self.context)
# Simple Factory Pattern Ends
# Observer Pattern Starts
class Subject(ABC):
@abstractmethod
def subscribe(self, learner):
pass
@abstractmethod
def unsubscribe(self, learner):
pass
@abstractmethod
def notify(self, update):
pass
class ConcreteSubject(Subject):
def __init__(self, learners, messages):
self._learners = learners
self._messages = messages
def subscribe(self, learner):
self._learners.append(learner)
mongo_data = {'observer_list': self._learners}
temp = mdb.update(db_data, "OP", mongo_data, "Success", "Error")
def unsubscribe(self, learner):
self._learners.remove(learner)
mongo_data = {'observer_list': self._learners}
temp = mdb.update(db_data, "OP", mongo_data, "Success", "Error")
def notify(self, update):
self._messages.insert(0, "Admin Has Updated The " + str(update))
if len(self._messages) > 5:
temp = self._messages.pop()
mongo_data = {'update_message_list': self._messages}
temp = mdb.update(db_data, "OP", mongo_data, "Success", "Error")
class Learner(ABC):
@abstractmethod
def update(self):
pass
class ConcreteLearner(Learner):
def update(self):
mongo_data = mdb.find(db_data, "OP")
return mongo_data
# Observer Pattern Ends
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,370
|
waliamehak/LearnML
|
refs/heads/main
|
/user_classifier/views.py
|
# Django Imports
from django.shortcuts import render, redirect
from django.contrib import messages
from django.utils.decorators import method_decorator
from django.contrib.auth.models import Group
from django.views import View
# Internal Imports
from . import mongodb as mdb
from .forms import CreateUserForm
from admin_classifier.views import ConcreteSubject, ConcreteLearner
# Python Package Imports
import pickle
import numpy as np
# Create your views here.
db_data = mdb.access()
# Home and Login Module Starts
class Home(View):
template_name = 'user_classifier/home.html'
context = {}
def get(self, request):
username = str(request.user)
learner = ConcreteLearner()
mongo_data = learner.update()
len_update_messages_list = len(mongo_data['update_message_list'])
temp = "\n".join(mongo_data['update_message_list'])
update_messages_list = [temp]
observer_list = mongo_data['observer_list']
self.context = {'username': str(username),
'observer_list': observer_list,
'update_messages_list': update_messages_list,
'len_update_messages_list': len_update_messages_list}
return render(request, self.template_name, self.context)
def post(self, request):
username = str(request.user)
learner = ConcreteLearner()
mongo_data = learner.update()
subject = ConcreteSubject(mongo_data["observer_list"], mongo_data["update_message_list"])
if request.POST.get('subscribe') == "Subscribe Updates":
alert_message = subject.subscribe(username)
if request.POST.get('unsubscribe') == "Unsubscribe Updates":
alert_message = subject.unsubscribe(username)
mongo_data = learner.update()
len_update_messages_list = len(mongo_data['update_message_list'])
temp = "\n".join(mongo_data['update_message_list'])
update_messages_list = [temp]
observer_list = mongo_data['observer_list']
self.context = {'username': username,
'observer_list': observer_list,
'update_messages_list': update_messages_list,
'len_update_messages_list': len_update_messages_list}
return render(request, self.template_name, self.context)
# Simple Factory Pattern Starts
class Algorithm(View):
def get(self, request):
pass
def post(self, request):
pass
class Classification(Algorithm):
template_name = 'user_classifier/classification.html'
message = ""
submit_button = None
def get(self, request):
data = mdb.find(db_data, "KNN")
context = {'algo_desc': data['algo_desc'], 'ds_desc': data['ds_desc'],
'training_features': data['training_features']}
return render(request, self.template_name, context)
def post(self, request):
data = mdb.find(db_data, "KNN")
graph_image = data['graph_image']
if data['upload_method'] == 'pkl':
classifier = pickle.loads(pickle.loads(data['pkl_data']).read())
else:
classifier = pickle.loads(data['pkl_data'])
if 'submit' in request.POST:
self.submit_button = request.POST.get("submit")
output_message = data['label_notes']
user_inputs = [np.array(request.POST.getlist('user_inputs')).astype(np.float64)]
sc = pickle.loads(data['scaling_obj'])
try:
user_inputs = sc.transform(user_inputs)
preds = classifier.predict(user_inputs)
self.message = output_message[str(preds[0])]
if data['upload_method'] == 'csv':
accuracy = data['testing_accuracy']
f1_score = data['f1_score']
extra = " (" + str(accuracy) + "% accuracy and " + str(f1_score) + "% F1-Score)"
self.message = self.message + extra
except:
self.message = "Unexpected error while predicting the output"
context = {'algo_desc': data['algo_desc'], 'ds_desc': data['ds_desc'],
'training_features': data['training_features'], 'graph_image': graph_image,
'message': self.message, 'submitbutton': self.submit_button}
return render(request, self.template_name, context)
class Regression(Algorithm):
template_name = 'user_classifier/regression.html'
message = ""
submit_button = None
def get(self, request):
data = mdb.find(db_data, "MLR")
context = {'algo_desc': data['algo_desc'], 'ds_desc': data['ds_desc'],
'training_features': data['training_features']}
return render(request, self.template_name, context)
def post(self, request):
data = mdb.find(db_data, "MLR")
graph_image = data['graph_image']
if data['upload_method'] == 'pkl':
regressor = pickle.loads(pickle.loads(data['pkl_data']).read())
else:
regressor = pickle.loads(data['pkl_data'])
if 'submit' in request.POST:
self.submit_button = request.POST.get("submit")
user_inputs = [np.array(request.POST.getlist('user_inputs')).astype(np.float64)]
sc = pickle.loads(data['scaling_obj'])
try:
user_inputs = sc.transform(user_inputs)
preds = regressor.predict(user_inputs)
self.message = "The predicted profit of the startup is " + str(round(preds[0], 2))
if data['upload_method'] == 'csv':
rmse = data['rmse']
extra = " (With " + str(rmse) + " Root Mean-Squared Error)"
self.message = self.message + extra
except:
self.message = "Unexpected error while predicting the output"
context = {'algo_desc': data['algo_desc'], 'ds_desc': data['ds_desc'],
'training_features': data['training_features'], 'graph_image': graph_image,
'message': self.message, 'submitbutton': self.submit_button}
return render(request, self.template_name, context)
class Clustering(Algorithm):
template_name = 'user_classifier/clustering.html'
message = ""
submit_button = None
def get(self, request):
data = mdb.find(db_data, "KM")
context = {'algo_desc': data['algo_desc'], 'ds_desc': data['ds_desc'],
'training_features': data['training_features']}
return render(request, self.template_name, context)
def post(self, request):
data = mdb.find(db_data, "KM")
graph_image = data['graph_image']
if data['upload_method'] == 'pkl':
classifier = pickle.loads(pickle.loads(data['pkl_data']).read())
else:
classifier = pickle.loads(data['pkl_data'])
if 'submit' in request.POST:
self.submit_button = request.POST.get("submit")
output_message = data['label_notes']
user_inputs = np.array(request.POST.getlist('user_inputs')).astype(np.float64)
try:
preds = classifier.predict([user_inputs])
self.message = output_message[str(preds[0])]
except:
self.message = "Unexpected error while predicting the output"
context = {'algo_desc': data['algo_desc'], 'ds_desc': data['ds_desc'],
'training_features': data['training_features'], 'graph_image': graph_image,
'message': self.message, 'submitbutton': self.submit_button}
return render(request, self.template_name, context)
# Simple Factory Pattern Ends
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,371
|
waliamehak/LearnML
|
refs/heads/main
|
/user_classifier/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.Home.as_view(), name='learner_home'),
path('classification', views.Classification.as_view(), name='learner_classification'),
path('regression', views.Regression.as_view(), name='learner_regression'),
path('clustering', views.Clustering.as_view(), name='learner_clustering')
]
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,372
|
waliamehak/LearnML
|
refs/heads/main
|
/admin_classifier/forms.py
|
from django import forms
weights_choices = [('uniform', 'uniform'),
('distance', 'distance')]
algorithm_choices = [('auto', 'auto'),
('ball_tree', 'ball_tree'),
('kd_tree', 'kd_tree'),
('brute', 'brute')]
class InputForm(forms.Form):
n_neighbors = forms.IntegerField(help_text="Enter Number of Neighbors")
leaf_size = forms.IntegerField(help_text="Enter Leaf Size")
weights = forms.CharField(label='Select Weights Type', widget=forms.Select(choices=weights_choices))
algorithm = forms.CharField(label='Select Algorithm Type', widget=forms.Select(choices=algorithm_choices))
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,373
|
waliamehak/LearnML
|
refs/heads/main
|
/LearnML Files/Temp Files/p2.py
|
from pymongo import MongoClient
client = MongoClient("mongodb+srv://user_1:USER_1@cluster0.0oqke.mongodb.net/<dbname>?retryWrites=true&w=majority")
db = client.get_database('learnml_db')
db_data = db.algorithms
mongo_data = {"update_message_list": ["Welcome To Learn ML"]}
db_data.update_one({'name': 'OP'}, {'$set': mongo_data})
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,374
|
waliamehak/LearnML
|
refs/heads/main
|
/admin_classifier/mongodb.py
|
from pymongo import MongoClient
def access():
client = MongoClient("mongodb+srv://user_1:USER_1@cluster0.0oqke.mongodb.net/<dbname>?retryWrites=true&w=majority")
db = client.get_database('learnml_db')
db_data = db.algorithms
return db_data
def update(db_data, algo_name, update_data, success_message, error_message):
try:
db_data.update_one({'name': algo_name}, {'$set': update_data})
update_message = success_message
except:
update_message = error_message
return update_message
def find(db_data, algo_name):
return db_data.find_one({'name': algo_name})
|
{"/user_classifier/views.py": ["/admin_classifier/views.py"]}
|
15,375
|
gormlabenz/chat-textblob
|
refs/heads/master
|
/app/events.py
|
from app import *
@sio.event
def connect(sid, environ):
print("connect ", sid)
@sio.event
def clientToServer(sid, data):
print("message ", data['text'])
response = chatbot.chat(data['text'])
sio.emit("serverToClient", response)
print("response ", response)
@sio.event
def disconnect(sid):
print("disconnect ", sid)
|
{"/app/events.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py"], "/app/__init__.py": ["/app/chatbot.py"]}
|
15,376
|
gormlabenz/chat-textblob
|
refs/heads/master
|
/run.py
|
from app import app
import eventlet
if __name__ == "__main__":
eventlet.wsgi.server(eventlet.listen(("", 5000)), app)
|
{"/app/events.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py"], "/app/__init__.py": ["/app/chatbot.py"]}
|
15,377
|
gormlabenz/chat-textblob
|
refs/heads/master
|
/app/__init__.py
|
import socketio
from app.chatbot import Chatbot
sio = socketio.Server(cors_allowed_origins="http://localhost:8080")
app = socketio.WSGIApp(sio)
chatbot = Chatbot('app/intents.json')
from app import events
|
{"/app/events.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py"], "/app/__init__.py": ["/app/chatbot.py"]}
|
15,378
|
gormlabenz/chat-textblob
|
refs/heads/master
|
/app/chatbot.py
|
from textblob.classifiers import NaiveBayesClassifier
import json
import random
from pathlib import Path
import pprint
class Chatbot:
def __init__(self, intents):
"""
The Chatbot class. Insert a json file in a specific format. The Chatbot will be trained
to find a pattern in a given text string and return a response.
Define the patterns the bot should find. Then set the answers it should give
and give them both a suitable label.
The format of the json file:
{
"intents": [
{
"label": "greeting",
"patterns": [
"Hi",
"How are you",
],
"responses": [
"Hello!",
"Good to see you again!",
],
}
:param intents: A json file with the intents. Use the specific format.
"""
file_path = Path.cwd() / intents
with open(file_path, "r") as file:
self.data = json.load(file)
self.classifier = self.train()
def train(self):
"""
Train a classifier dependeding on the data
"""
train = []
for intent in self.data["intents"]:
for pattern in intent["patterns"]:
train.append((pattern, intent["label"]))
pprint.pprint(train)
cl = NaiveBayesClassifier(train)
print('Accuracity: ', cl.accuracy(train))
cl.show_informative_features(5)
return cl
def chat(self, input_text):
"""
Insert a string and get a response
:param input_text: A string that the response depends on
"""
label = self.classifier.classify(input_text)
return self.get_response(label)
def get_response(self, label):
"""
Insert a label defined in the intent data and get a random response
:param label: The label of tha you want a response
"""
intents = self.data["intents"]
responses = [
intent["responses"] for intent in intents if intent["label"] == label
]
return random.choice(responses[0])
|
{"/app/events.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py"], "/app/__init__.py": ["/app/chatbot.py"]}
|
15,396
|
cu-swe4s-fall-2019/version-control-jfgugel
|
refs/heads/master
|
/math_lib.py
|
def div(a, b):
if b==0:
print ("Can't divide by 0")
return 1
else:
return a/b
def add(a, b):
return a+b
|
{"/calculate.py": ["/math_lib.py"]}
|
15,397
|
cu-swe4s-fall-2019/version-control-jfgugel
|
refs/heads/master
|
/calculate.py
|
import math_lib as mathfunctions
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='take input from terminal')
parser.add_argument('first_value', type=int, help='first integer to be added')
parser.add_argument('second_value', type=int, help='second integer to be added')
args = parser.parse_args()
a= args.first_value
b= args.second_value
answer1 = mathfunctions.add(a,b)
answer2 = mathfunctions.div(answer1,3)
print(answer1)
print(answer2)
|
{"/calculate.py": ["/math_lib.py"]}
|
15,413
|
pklaus/urqmd-analysis
|
refs/heads/master
|
/plot_urqmd_pandas.py
|
#!/usr/bin/env python
""" UrQMD File Reader """
import argparse
import logging
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np
def main():
parser = argparse.ArgumentParser(description='Read a config file.')
parser.add_argument('hdf5_file', metavar='HDF5_FILE', help="The HDF5 file containing the UrQMD events")
parser.add_argument('--verbosity', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'])
parser.add_argument('--event-no', type=int, help='Total number of events (to scale histograms)')
args = parser.parse_args()
logging.basicConfig(level=args.verbosity)
hdf = pd.HDFStore(args.hdf5_file)
df = hdf['particles']
try:
event_no = len(df['event_id'].unique())
except:
if args.event_no: event_no = args.event_no
else: parser.error('The event_id is not included in the data. You must thus specify --event-no as param.')
df['y'] = .5 * np.log((df.p0 + df.pz)/(df.p0 - df.pz))
df['mT'] = np.sqrt(df.m**2 + df.px**2 + df.py**2)
df['mT_weights'] = 1./df.mT**2
nucleons = df[df.ityp == 1]
pions = df[df.ityp == 101]
kaons = df[abs(df.ityp) == 106]
logging.info("{} particles of which {} pions or kaons".format(len(df), len(pions), len(pions)+len(kaons)))
fig, ax = plt.subplots(1,2, figsize=(10,4))
### rapidity distribution
ax[0].set_title('Rapidity Distribution')
#fig.text(0.35, 0.04, 'rapidity', ha='center', va='center')
ax[0].set_xlabel('rapidity y / GeV')
#fig.text(0.10, 0.5, 'dN/dy', ha='center', va='center', rotation='vertical')
ax[0].set_ylabel('dN/dy')
bins_rapidity = np.linspace(-4.0, 4.0, num=81)
# All Particles
hist, bins = np.histogram(df.y, bins=bins_rapidity)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='grey', label='all particles')
# Pions
hist, bins = np.histogram(pions.y, bins=bins_rapidity)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='blue', label='pions')
prev_hist = hist
# Nucleons
hist, bins = np.histogram(nucleons.y, bins=bins_rapidity)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='yellow', label='nucleons', bottom=prev_hist)
prev_hist += hist
# Kaons
hist, bins = np.histogram(kaons.y, bins=bins_rapidity)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='red', label='kaons', bottom=prev_hist)
ax[0].legend()
### transverse mass distribution
ax[1].set_title('Transverse Mass Distribution')
#fig.text(0.70, 0.04, 'mT / GeV', ha='center', va='center')
ax[1].set_xlabel('dN/dy')
#fig.text(0.50, 0.5, '1/mT^2 dN/dmT', ha='center', va='center', rotation='vertical')
ax[1].set_ylabel('1/mT^2 dN/dmT')
# We use the rapidity cut: |y| < 1.0
nucleons = nucleons[np.abs(nucleons.y) < 1.0]
pions = pions[np.abs(pions.y) < 1.0]
kaons = kaons[np.abs(kaons.y) < 1.0]
bins_mT = np.linspace(0.0, 4.0, num=81)
# Nucleons
hist, bins = np.histogram(nucleons.mT, weights=nucleons.mT_weights, bins=bins_mT)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='yellow', log=True, fill=True, label='nucleons')
# Pions
hist, bins = np.histogram(pions.mT, weights=pions.mT_weights, bins=bins_mT)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='blue', log=True, fill=True, label='pions')
# Kaons
hist, bins = np.histogram(kaons.mT, weights=kaons.mT_weights, bins=bins_mT)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='red', log=True, fill=True, label='kaons')
ax[1].legend()
fig.show()
# Fitting the temperature
def decay(x, x_p, y_p, y0, x0):
return y0 + y_p * np.exp(-(x-x0)*x_p)
import pdb; pdb.set_trace()
hdf.close()
if __name__ == "__main__":
main()
|
{"/plot_urqmd.py": ["/read_urqmd.py"]}
|
15,414
|
pklaus/urqmd-analysis
|
refs/heads/master
|
/plot_urqmd.py
|
#!/usr/bin/env python
""" UrQMD File Reader """
from read_urqmd import F14_Reader
import argparse
import pickle
import logging
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np
class Particle(object):
def __init__(self, properties):
self._properties = properties
# precalculate expensive values:
self._mT = math.sqrt(self.m0**2 + self.px**2 + self.py**2)
@property
def id(self):
return int(self._properties[9])
@property
def time(self):
return float(self._properties[0])
@property
def E(self):
return float(self._properties[4])
@property
def px(self):
return float(self._properties[5])
@property
def py(self):
return float(self._properties[6])
@property
def pz(self):
return float(self._properties[7])
@property
def m0(self):
return float(self._properties[8])
@property
def mT(self):
return self._mT
@property
def y(self):
""" rapidity """
return .5 * math.log((self.E + self.pz)/(self.E - self.pz))
def main():
parser = argparse.ArgumentParser(description='Read a config file.')
parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r'), help="Must be of type .f14")
parser.add_argument('--verbosity', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'])
args = parser.parse_args()
logging.basicConfig(level=args.verbosity)
events = []
event_number = []
particle_number = []
particle_y = []
nucleon_y = []
nucleon_number = []
pion_number = []
kaon_number = []
pion_y = []
pion_mT = []
pion_mT_weights = []
kaon_y = []
kaon_mT = []
kaon_mT_weights = []
particle_mT = []
particle_mT_weights = []
nucleon_mT = []
nucleon_mT_weights = []
for event in F14_Reader(args.urqmd_file).get_events():
events.append(event)
event_number.append(event['id'])
particles = [Particle(particle_properties) for particle_properties in event['particle_properties']]
particle_y += [particle.y for particle in particles]
particle_mT += [particle.mT for particle in particles if abs(particle.y) < 1.0]
particle_mT_weights += [1/particle.mT**2 for particle in particles if abs(particle.y) < 1.0]
nucleons = [particle for particle in particles if particle.id == 1]
pions = [particle for particle in particles if particle.id == 101]
kaons = [particle for particle in particles if abs(particle.id) == 106]
nucleon_number.append(len(nucleons))
pion_number.append(len(pions))
kaon_number.append(len(kaons))
for nucleon in nucleons:
nucleon_y.append(nucleon.y)
if abs(nucleon.y) < 1.0:
nucleon_mT.append(nucleon.mT)
# weights for the histogram
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
nucleon_mT_weights.append(1/nucleon.mT**2)
for pion in pions:
pion_y.append(pion.y)
if abs(pion.y) < 1.0:
pion_mT.append(pion.mT)
# weights for the histogram
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
pion_mT_weights.append(1/pion.mT**2)
for kaon in kaons:
kaon_y.append(kaon.y)
if abs(kaon.y) < 1.0:
kaon_mT.append(kaon.mT)
# weights for the histogram
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
kaon_mT_weights.append(1/kaon.mT**2)
particle_number.append(len(particles))
logging.info("Event #{}: {} particles of which {} pions or kaons".format(event['id'], len(particles), len(pions)+len(kaons)))
df_physics = pd.DataFrame({'particles': particle_number, 'pions': pion_number, 'kaons': kaon_number}, index=event_number)
df_events = pd.DataFrame({'particles': particle_number, 'pions': pion_number, 'kaons': kaon_number}, index=event_number)
print(df_events.describe())
event_no = len(events)
fig, ax = plt.subplots(1,2, figsize=(10,4))
### rapidity distribution
ax[0].set_title('Rapidity Distribution')
#fig.ylabel('dN/dy')
#ax[0].xlabel('y / GeV')
bins_rapidity = 50
# All Particles
hist, bins = np.histogram(particle_y, bins=bins_rapidity)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='grey', label='all particles')
# Pions
hist, bins = np.histogram(pion_y, bins=bins_rapidity)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='blue', label='pions')
# Nucleons
hist, bins = np.histogram(nucleon_y, bins=bins_rapidity)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='yellow', label='nucleons')
# Kaons
hist, bins = np.histogram(kaon_y, bins=bins_rapidity)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='red', label='kaons')
ax[0].legend()
### transverse mass distribution
ax[1].set_title('Transverse Mass Distribution')
#ax[1].ylabel('1/mT^2 dN/dmT')
#ax[1].xlabel('mT / GeV')
bins_mT = 80
# Nucleons
hist, bins = np.histogram(nucleon_mT, weights=nucleon_mT_weights, bins=bins_mT)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='yellow', log=True, fill=True, label='nucleons')
# Pions
hist, bins = np.histogram(pion_mT, weights=pion_mT_weights, bins=bins_mT)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='blue', log=True, fill=True, label='pions')
# Kaons
hist, bins = np.histogram(kaon_mT, weights=kaon_mT_weights, bins=bins_mT)
#rescale histo:
for i in range(len(hist)):
bin_width = bins[1] - bins[0]
hist[i] = hist[i] / bin_width / event_no
ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='red', log=True, fill=True, label='kaons')
ax[1].legend()
fig.show()
import pdb; pdb.set_trace()
if __name__ == "__main__":
main()
|
{"/plot_urqmd.py": ["/read_urqmd.py"]}
|
15,415
|
pklaus/urqmd-analysis
|
refs/heads/master
|
/read_urqmd.py
|
#!/usr/bin/env python
""" UrQMD File Reader """
import argparse
import pickle
import logging
class F14_Reader(object):
def __init__(self, data_file):
self.data_file = data_file
def get_events(self):
new_event = False
event = None
for line in self.data_file:
parts = line.split()
if not len(parts): continue
if parts[0] == 'UQMD': new_event = True
if new_event:
if event: yield event
event = dict()
event['particle_properties'] = []
new_event = False
if parts[0] == 'event#': event['id'] = int(parts[1])
if len(parts) == 15:
event['particle_properties'].append(parts)
if event: yield event
def main():
parser = argparse.ArgumentParser(description='Read a config file.')
parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r'), help="Must be of type .f14")
args = parser.parse_args()
for event in F14_Reader(args.urqmd_file).get_events():
print("Event #{} containing {} particles".format(event['id'], len(event['particle_properties'])))
if __name__ == "__main__":
main()
|
{"/plot_urqmd.py": ["/read_urqmd.py"]}
|
15,416
|
pklaus/urqmd-analysis
|
refs/heads/master
|
/read_urqmd_pandas.py
|
#!/usr/bin/env python
""" UrQMD File Reader """
import pandas as pd
import numpy as np
import tables
import argparse
import logging
import warnings
import multiprocessing
import time
import queue
class F14_Reader(object):
def __init__(self, data_file, add_event_columns=False, renumber_event_ids=True):
self.data_file = data_file
self.add_event_columns = add_event_columns
self.renumber_event_ids = renumber_event_ids
def get_dataframe(self):
return pd.concat(list(self.iter_dataframes()), ignore_index=True)
def iter_dataframes(self, chunksize=100000):
curr_event_id = 0
curr_impact = 0.0
names = ['r0', 'rx', 'ry', 'rz', 'p0', 'px', 'py', 'pz', 'm', 'ityp', '2i3', 'chg', 'lcl#', 'ncl', 'or']
for df in pd.read_table(self.data_file, names=names, delim_whitespace=True, chunksize=chunksize):
logging.info('Read {} lines from {}.'.format(len(df), self.data_file.name))
# -- add additional event_* columns
if self.add_event_columns:
#total_event_no = len(df[df.r0 == 'UQMD'])
df['event_id'] = curr_event_id
df['event_ip'] = curr_impact
event_start = None
for idx in df[df.r0 == 'UQMD'].index:
# remember the index where the event started
if event_start == None:
event_start = idx
continue
curr_impact = df.loc[event_start+3, 'rx']
# set curr_event_id
if self.renumber_event_ids:
curr_event_id += 1
else:
curr_event_id = df.loc[event_start+5, 'rx']
# update event_id and event_ip for the event from event_start (the current event) to idx (the new event)
df.loc[event_start:idx, 'event_ip'] = curr_impact
df.loc[event_start:idx, 'event_id'] = curr_event_id
event_start = idx
# update particles belonging to the last event
curr_impact = df.loc[event_start+3, 'rx']
if self.renumber_event_ids:
curr_event_id += 1
else:
curr_event_id = df.loc[event_start + 5, 'rx']
df.loc[event_start:, 'event_id'] = curr_event_id
df.loc[event_start:, 'event_ip'] = curr_impact
# -- end add event_* columns
df = df[df['or'].notnull()]
df = df.convert_objects(convert_numeric=True)
df.dropna(how='any', inplace=True)
if self.add_event_columns:
df['event_id'] = df['event_id'].astype(np.uint32)
df['event_ip'] = df['event_ip'].astype(np.float32)
df['r0'] = df['r0'].astype(np.float32)
df['rx'] = df['rx'].astype(np.float32)
df['ry'] = df['ry'].astype(np.float32)
df['rz'] = df['rz'].astype(np.float32)
df['p0'] = df['p0'].astype(np.float32)
df['px'] = df['px'].astype(np.float32)
df['py'] = df['py'].astype(np.float32)
df['pz'] = df['pz'].astype(np.float32)
df['m'] = df['m'].astype(np.float32)
df['ityp'] = df['ityp'].astype(np.int16)
df['2i3'] = df['2i3'].astype(np.int8)
df['chg'] = df['chg'].astype(np.int8)
df['lcl#'] = df['lcl#'].astype(np.uint32)
df['ncl'] = df['ncl'].astype(np.uint16)
df['or'] = df['or'].astype(np.uint16)
yield df
class HDF_Worker(multiprocessing.Process):
def __init__(self, h5_path, queue):
self.h5_path = h5_path
self.queue = queue
self.block_period = .01
super(HDF_Worker, self).__init__()
def run(self):
self.hdf = pd.HDFStore(self.h5_path)
original_warnings = list(warnings.filters)
warnings.simplefilter('ignore', tables.NaturalNameWarning)
while True:
try:
# get queue content
qc = self.queue.get(timeout=self.block_period)
except queue.Empty:
continue
if type(qc) == str and qc == 'EOF': break
self.hdf.append('particles', qc, data_columns=True, index=False)
self.hdf.close()
warnings.filters = original_warnings
def main():
parser = argparse.ArgumentParser(description='Read a config file.')
parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r', encoding='ascii'), help="Must be of type .f14")
parser.add_argument('out_file', metavar='OUT_FILE', help='The HDF5 (.h5) file to store the information in')
parser.add_argument('--no-event-columns', action='store_true', help="Don NOT include columns for the event number and event impact parameter.")
parser.add_argument('--chunksize', type=int, default = 100000, help='The number of lines to read in one go.')
parser.add_argument('--verbosity', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default='INFO', help="How verbose should the output be")
args = parser.parse_args()
logging.basicConfig(level=args.verbosity, format='%(asctime)s.%(msecs)d %(levelname)s %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
queue = multiprocessing.Queue()
worker = HDF_Worker(args.out_file, queue)
worker.start()
for df in F14_Reader(args.urqmd_file, not args.no_event_columns).iter_dataframes(chunksize = args.chunksize):
logging.debug("DataFrame ready to be written to file.")
if not queue.empty(): time.sleep(0.05)
logging.debug("Queue empty. DataFrame will be put into write queue now.")
queue.put(df.copy())
queue.put('EOF')
queue.close()
queue.join_thread()
worker.join()
if __name__ == "__main__":
main()
|
{"/plot_urqmd.py": ["/read_urqmd.py"]}
|
15,437
|
NikitaZagor/PyPennet
|
refs/heads/master
|
/PyPennet/gui/baseFrame.py
|
from tkinter import Frame, Button, Menu, StringVar
from tkinter import BOTH, END, LEFT, RIGHT, RAISED, SUNKEN, TOP, X
from PyPennet.gui.baseEntry import BaseEntry
import PyPennet.utility.util as my_util
class BaseFrame:
def __init__(self, num, onClose, onExecution, parent=None):
self.parent = parent
self.onClose = onClose
self.onExecution = onExecution
if(parent is not None):
parent.resizable(False, False) # Запрещаем изменение размеров окна
self.countColumn = num
self.entries = [] # В этом списке num X num будут храниться ссылки на entry результатов
for i in range(num):
self.entries.append([])
self.columnTitles = [] # В этом списке будут храниться ссылки на entry заглавной строки
self.rowTitles = [] # В этом списке будут храниться ссылки на entry заглавной колонки
self.curResult = StringVar()
self.initUI()
def initUI(self):
# Создаем контейнер для размещения матрицы полей ввода данных и вывода результатов
self.entryFrame = Frame(self.parent, bd = 5, relief = SUNKEN)
# Создаем меню
self.makeMenu()
# Размещаем поля ввода данных для верхней строки и запоминаем ссылки на них в списке: firstRow
for i in range(self.countColumn + 1):
entr = BaseEntry(self.countColumn, self.entryFrame)
if i != 0:
self.columnTitles.append(entr)
entr.grid(row = 0, column = i, padx = 10, pady = 10)
# Размещаем поля ввода для первой колонки и для строк вывода результатов
for i in range(self.countColumn):
for j in range(self.countColumn + 1):
entr = BaseEntry(self.countColumn, self.entryFrame)
if j == 0: # Это первое поле колонки
self.rowTitles.append(entr)
else: # Это поле матрицы результата
self.entries[i].append(entr)
entr.setReadOnly()
entr.grid(row=i+1, column=j, padx = 10, pady = 10)
self.entryFrame.pack(side = TOP, expand = True, fill = BOTH)
# Размещаем командные кнопки
self.frameBtn = Frame(self.parent, bd=5, relief=SUNKEN)
self.btnExec = Button(self.frameBtn, text='Построить', command=self.onExec, bd=5, relief=RAISED)
self.btnExec.pack(side = LEFT, padx = 10, pady = 10)
self.btnExit = Button(self.frameBtn, text='Закрыть', command=self.onExit, bd=5, relief=RAISED)
self.btnExit.pack(side = RIGHT, padx = 10, pady = 10)
self.frameBtn.pack(side = TOP, expand = True, fill = X)
def makeMenu(self):
menubar = Menu(self.parent, tearoff=0)
self.parent.config(menu=menubar)
file_menu = Menu(menubar, tearoff=0)
file_menu.add_command(label='Сохранить результат', command = self.saveResult)
clear_menu = Menu(file_menu, tearoff=0)
clear_menu.add_command(label='Очистить заголовоки колонок', command = self.clearRow)
clear_menu.add_command(label='Очистить заголовки строк', command = self.clearColumn)
clear_menu.add_command(label='Очистить результат', command = self.clearGrid)
file_menu.add_cascade(label='Очистить данные', menu = clear_menu, underline = 0)
use_menu = Menu(file_menu, tearoff=0)
use_menu.add_command(label='Заполнить заголовоки колонок', command = lambda: self.fill_column_titles(''))
use_menu.add_command(label='Заполнить заголовки строк', command = lambda: self.fill_row_titles(''))
file_menu.add_cascade(label='Использовать результат',menu = use_menu, underline = 0)
file_menu.add_separator()
file_menu.add_command(label='Закрыть', command = self.onExit)
menubar.add_cascade(label='Файл', menu=file_menu)
def onExit(self):
title = self.parent.title()
result = self.curResult.get()
if len(result.strip()) == 0:
self.onClose(title)
self.parent.destroy()
def onExec(self): # Заполняем ячейки сетки и формируем строку содержимого ячеек
self.curResult.set('') # Обнуляем текущие данные
tmp = ''
for i in range(self.countColumn): # Прхолим по строкам таблицы
for j in range(self.countColumn): # Проходим по колонкам таблицы
valRow = self.rowTitles[i].get()
valColumn = self.columnTitles[j].get()
if len(valRow) != 0 and len(valColumn) != 0:
result = valRow + valColumn # Результат соединение строки из ячейки первой колонки
result = my_util.sortStr(result) # и строки из ячейки первой строки
entr = self.entries[i][j]
entr['width'] = len(result) + 1
entr.setText(result) # Заносим текст в ячейку результата
tmp += result # Накапливаем результат вы строке результата
tmp += ',' # Разделяем данные запятой
tmp = tmp.rstrip(',')
self.curResult.set(tmp)
title = self.parent.title()
self.onExecution(title, self.curResult.get())
def saveResult(self):
if len(self.curResult.get()) > 0:
result = []
row = my_util.getStrFromEntries(self.columnTitles)
result.append(row)
column = my_util.getStrFromEntries(self.rowTitles)
result.append(column)
result.append(self.curResult.get())
my_util.saveToFile(result)
def fillEntries(self, entryLst, string):
lst = [s.strip() for s in string.split(',')]
length = len(lst)
# Очистим ячейки
for i in range(self.countColumn):
entryLst[i].delete(0, END)
# Заносим новые значения
if length > self.countColumn:
for i in range(self.countColumn):
entryLst[i].insert(0, lst[i])
else:
for i in range(length):
entryLst[i].insert(0, lst[i])
def fill_column_titles(self, string):
if len(string) == 0:
prevResult = my_util.getFromFile()
if prevResult != None:
self.fillEntries(self.columnTitles, prevResult)
else:
self.fillEntries(self.columnTitles, string)
def fill_row_titles(self, string):
if len(string) == 0:
prevResult = my_util.getFromFile()
if prevResult != None:
self.fillEntries(self.rowTitles, prevResult)
else:
self.fillEntries(self.rowTitles, string)
def clearRow(self):
for entr in self.columnTitles:
entr.setText('')
def clearColumn(self):
for entr in self.rowTitles:
entr.setText('')
def clearGrid(self):
for i in range(self.countColumn):
for j in range(self.countColumn):
self.entries[i][j].setText('')
if __name__ == '__main__':
from tkinter import Toplevel, Tk, Label, Entry
def showTopLevel(parent, entr):
window = Toplevel(parent)
size = entr.get()
dim = int(size)
baseFrame = BaseFrame(dim, window)
root = Tk()
btn = Button(root, text = 'Show', command = lambda: showTopLevel(root, entr))
btn.grid(row = 0, column = 0, padx = 10, pady = 10)
lbl = Label(text = 'Размер')
lbl.grid(row = 0, column = 1, padx = 10, pady = 10)
entr = Entry(root, width = 4)
entr.grid(row = 0, column = 2,padx = 10, pady = 10)
root.mainloop()
|
{"/PyPennet/gui/baseFrame.py": ["/PyPennet/gui/baseEntry.py", "/PyPennet/utility/util.py"], "/PyPennet/mainWindow.py": ["/PyPennet/gui/listwindow.py", "/PyPennet/gui/gridparams.py", "/PyPennet/gui/baseFrame.py", "/PyPennet/gui/showresult.py", "/PyPennet/gui/about_dlg.py"], "/PyPennet/gui/baseEntry.py": ["/PyPennet/utility/util.py", "/PyPennet/utility/constants.py"], "/PyPennet/gui/gridparams.py": ["/PyPennet/utility/util.py", "/PyPennet/gui/prev_perult.py"]}
|
15,438
|
NikitaZagor/PyPennet
|
refs/heads/master
|
/PyPennet/gui/prev_perult.py
|
from tkinter import Toplevel, Frame, Button, Listbox, Scrollbar
from tkinter import SINGLE, END, LEFT, RIGHT, RAISED, RIDGE, TOP, BOTH, X, Y, SUNKEN
class PrevResult(Toplevel):
def __init__(self, grids, selected, parent=None):
Toplevel.__init__(self, parent)
self.parent = parent
self.title('Параметры новой решетки')
self.grid = grids
self.select = selected
self.initUI()
def initUI(self):
main_frame = Frame(self, bd=5, relief=RIDGE)
main_frame.pack(side=TOP, expand=True, fill=BOTH)
listbox = Listbox(main_frame, width=35, height=4, selectmode=SINGLE, bd=5, relief=RIDGE)
listbox.bind('<<ListboxSelect>>', self.selected)
scrbary = Scrollbar(main_frame, orient='vertical')
scrbarx = Scrollbar(main_frame, orient='horizontal')
scrbary.config(command=listbox.yview, relief=SUNKEN)
scrbarx.config(command=listbox.xview, relief=SUNKEN)
listbox.config(yscrollcommand=scrbary.set, xscrollcommand=scrbarx.set)
scrbary.pack(side=RIGHT, fill=Y)
scrbarx.pack(side='bottom', fill=X)
for key in self.grid.keys():
listbox.insert(END, key)
listbox.pack(side=TOP, expand=True, fill=BOTH)
frame_buttons = Frame(self, bd=5, relief=RIDGE)
frame_buttons.pack(side=TOP, expand=True, fill=X)
btn_ok = Button(frame_buttons, text='Выбрать ', bd=5, relief=RAISED, command=self.on_ok)
btn_ok.pack(side=LEFT, padx=5, pady=5)
btn_cancel = Button(frame_buttons, text='Отменить', bd=5, relief=RAISED, command=self.on_cancel)
btn_cancel.pack(side=RIGHT, padx=5, pady=5)
def selected(self, event):
widget = event.widget
items = widget.curselection()
self.select.set(widget.get(items[0]))
def on_ok(self):
self.destroy()
def on_cancel(self):
self.select.set(None)
self.destroy()
if __name__ == '__main__':
from tkinter import Tk, StringVar
root = Tk()
select = StringVar()
grids = {'Test1': [2, 'Test1', 'R', 'r', 'Y','y','RY,Ry,rY,ry'], 'Test2': [2, 'Test1', 'R', 'r', 'Y','y','RY,Ry,rY,ry']}
prev_res = PrevResult(grids, select, root)
prev_res.pack_slaves()
prev_res.focus_get()
prev_res.grab_set()
prev_res.wait_window()
print(select.get())
root.mainloop()
|
{"/PyPennet/gui/baseFrame.py": ["/PyPennet/gui/baseEntry.py", "/PyPennet/utility/util.py"], "/PyPennet/mainWindow.py": ["/PyPennet/gui/listwindow.py", "/PyPennet/gui/gridparams.py", "/PyPennet/gui/baseFrame.py", "/PyPennet/gui/showresult.py", "/PyPennet/gui/about_dlg.py"], "/PyPennet/gui/baseEntry.py": ["/PyPennet/utility/util.py", "/PyPennet/utility/constants.py"], "/PyPennet/gui/gridparams.py": ["/PyPennet/utility/util.py", "/PyPennet/gui/prev_perult.py"]}
|
15,439
|
NikitaZagor/PyPennet
|
refs/heads/master
|
/PyPennet/mainWindow.py
|
from tkinter import Tk, Frame, Button, Menu, Toplevel
from tkinter import BOTH, RIGHT, LEFT, RAISED, RIDGE, TOP, X
from tkinter import messagebox
from tkinter import filedialog
from tkinter import messagebox as mb
import pickle
import webbrowser
from PyPennet.gui.listwindow import ListWindow
from PyPennet.gui.gridparams import GridParams
from PyPennet.gui.baseFrame import BaseFrame
from PyPennet.gui.showresult import ShowResult
from PyPennet.gui.about_dlg import AboutDlg
from PyPennet.utility import constants as const
# Глобальные переменные
root = None # Главное окно программы
listbox = None # Listbox со списком открытых окон (Идентификатор и заголовок окна)
show_result = None # Контейнер резултатов обработки решетки
dic_grids = {} # Словарь с параметрами созданных решеток. Ключ - заголовок окна
def makeMenu(root):
main_menu = Menu(root)
root.config(menu=main_menu)
file_menu = Menu(main_menu, tearoff=0)
file_menu.add_command(label='Сохранить в файл', command=save_to_file)
file_menu.add_command(label='Загрузить из файла', command=from_file)
file_menu.add_separator()
file_menu.add_command(label='Закрыть', command=root.quit)
main_menu.add_cascade(label='Файл', menu=file_menu)
help_menu = Menu(main_menu, tearoff=0)
help_menu.add_command(label='Справка', command=help_app)
help_menu.add_command(label='О Программе', command=about_app)
main_menu.add_cascade(label='Справка', menu=help_menu)
def save_to_file():
global dic_grids
fout = filedialog.asksaveasfilename()
with open(fout, 'wb') as f:
pickle.dump(dic_grids, f, 3)
def from_file():
global dic_grids
dic_grids.clear()
fin = filedialog.askopenfilename()
with open(fin, 'rb') as f:
dic_grids = pickle.load(f, encoding='UTF-8')
global listbox
listbox.clear()
for key in dic_grids.keys():
listbox.insertData(key)
def help_app():
webbrowser.open_new_tab("index.html")
def about_app():
global root
dlg = AboutDlg(root)
# При выборе записи в окне списка проверим сформирована ли уже решетка
# Если сформирована - выводим в окна результата
# Если нет - выводим сообщение о том, что надо сформировать решетку
def showSelected(selected):
global dic_grids
lst = dic_grids[selected]
if len(lst[const.pos_data]) == 0:
messagebox.showerror('Ошибка', 'Решетка ' + selected + ' еще не сформирована')
return
global show_result
show_result.clearData()
show_result.insertData(lst)
# При закрытии окна удаляем все его следы
def closeGrid(string):
global dic_grids
del (dic_grids[string]) # Удаляем соответствующую запись из словаря
global listbox
listbox.removeData(string) # Удаляем запись из Listbox
global root
for item in root.winfo_children(): # Проходим по списку дочерних элементов главного окна
cls = item.winfo_class() # Определяем класс текущего элемента
if cls == 'Toplevel': # Если это окно верхнего уровня
title = item.title() # Читаем его заголовок
if title == string: # Если он равен искомому удаляем окно
item.destroy()
break
global show_result
show_result.clearData()
# При построении решетки, результат построения заносим в соответствующий элемент словаря
def execGrid(title, data):
global dic_grids
dic_grids[title][4] = data
# Заголовок окна заносим в список окон и его параметры в словарь lst_grids созданных решеток
def addNewGrid(root, listbox):
listParams = [] # Список содержит строки: размер решетки; заголовок окна; заголовки колонок; заголовки строк
global dic_grids
gridParams = GridParams(listParams, dic_grids, root) # Вызываем диалоговое окно для получения параметров новой решетки
gridParams.focus_set() # Устанавливаем фокус на окно
gridParams.grab_set() # Удерживаем фокус на этом окне
gridParams.wait_window() # Ждем завершения работы этого окна
# Если после его отработки listParams не пустой создаем окно с решеткой и введенными праметорами
if len(listParams) > 1:
listParams.append('') # Добавляем пустую строку для результата
result = listbox.insertData(listParams[const.pos_name]) # Вставим строку с заголовком окна в Listbox
if not result:
return
window = Toplevel(root) # Создаем окно верхнего уровня для размещения в нем создаваемой решетки
size = listParams[const.pos_size]
# Создаем решетку и размещаем ее на окне верхнего уровня
window.title(listParams[const.pos_name]) # устанавливаем заголовок окна
baseFrame = BaseFrame(int(size), closeGrid, execGrid, window)
if len(listParams[const.pos_columns]) > 0:
# Если задали заголовки колонок, устанавливаем их в решетке
baseFrame.fill_column_titles(listParams[const.pos_columns])
if len(listParams[const.pos_rows]) > 0:
baseFrame.fill_row_titles(listParams[const.pos_rows]) # Аналогично с заголовками строк
dic_grids[listParams[const.pos_name]] = listParams # Заносим созданный класс с парамертами решетки в список
# Удаляем выбранную в Listbox решетку
def deleteGrid(root, listbox):
global show_result
show_result.clearData()
string = listbox.getSelectedItem() # Получаем строку выбранную в Listbox, это заголовок окна
if string is not None:
listbox.removeData(string) # Удаляем эту строку из Listbox
global dic_grids
del(dic_grids[string]) # Удаляем соответствующую запись из словаря
for item in root.winfo_children(): # Проходим по списку дочерних элементов главного окна
cls = item.winfo_class() # Определяем класс текущего элемента
if cls == 'Toplevel': # Если это окно верхнего уровня
title = item.title() # Читаем его заголовок
if title == string: # Усли он равен искомому удаляем окно
item.destroy()
break
def main():
global root
root = Tk()
#root.geometry("600x400")
root.title('Решетки Пеннета для законов Менделя')
makeMenu(root)
# Создаем Frame контейнер для Listbox
frame_listbox = Frame(root, bd=5, relief=RIDGE)
frame_listbox.pack(side=TOP, expand=True, fill=BOTH)
global listbox
listbox = ListWindow(showSelected, frame_listbox) # Создаем экземпляр класса ListWindow
listbox.pack(side=TOP) # И размещаем его в контейнере
# Создаем контейнер для кнопок "Добавить" и "Удалить"
frame_btn = Frame(root, bd=5, relief=RIDGE)
frame_btn.pack(side=TOP, expand=True, fill=X)
btnAdd = Button(frame_btn, text='Добавить', bd=5, relief=RAISED, command=lambda: addNewGrid(root, listbox))
btnAdd.pack(side=LEFT, padx=5, pady=5)
btnDelete = Button(frame_btn, text='Удалить', bd=5, relief=RAISED, command=lambda: deleteGrid(root, listbox))
btnDelete.pack(side=RIGHT)
# Создаем экземпляр класса ShowResult и размещаем его в нашем окне
global show_result
show_result = ShowResult(root)
show_result.pack(side=TOP)
status_bar = Frame(root, bd=5, relief=RIDGE)
status_bar.pack(side=TOP, expand=True, fill=X)
btn_quit = Button(status_bar, text='Закрыть', command=root.quit, bd=5, relief=RAISED)
btn_quit.pack(side=RIGHT, padx=5, pady=5)
try:
root.iconbitmap('app.ico')
except:
mb.showerror("Ошибка", "Отсутствует файл app.ico")
root.mainloop()
if __name__ == '__main__':
main()
|
{"/PyPennet/gui/baseFrame.py": ["/PyPennet/gui/baseEntry.py", "/PyPennet/utility/util.py"], "/PyPennet/mainWindow.py": ["/PyPennet/gui/listwindow.py", "/PyPennet/gui/gridparams.py", "/PyPennet/gui/baseFrame.py", "/PyPennet/gui/showresult.py", "/PyPennet/gui/about_dlg.py"], "/PyPennet/gui/baseEntry.py": ["/PyPennet/utility/util.py", "/PyPennet/utility/constants.py"], "/PyPennet/gui/gridparams.py": ["/PyPennet/utility/util.py", "/PyPennet/gui/prev_perult.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.