index
int64
repo_name
string
branch_name
string
path
string
content
string
import_graph
string
57,703
TheoKlein/snake-core
refs/heads/master
/snake/utils/__init__.py
"""The Utils Module""" from snake.utils.file_storage import FileStorage # noqa
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,704
TheoKlein/snake-core
refs/heads/master
/snake/core/snake_handler.py
"""The snake requet handling module. Basically this is the module that contains all comminality for Tornado based aspects of snake. Attributes: TAIL_SIZE (int): The size of the sliding window for the stream based request handler. """ import json import logging import tempfile from datetime import datetime from os import path from bson import objectid from tornado import escape from tornado import httputil from tornado import web from webargs import tornadoparser from snake import error from snake.config import constants from snake.config import snake_config app_log = logging.getLogger("tornado.application") # pylint: disable=invalid-name gen_log = logging.getLogger("tornado.general") # pylint: disable=invalid-name TAIL_SIZE = 50 class JSONEncoder(json.JSONEncoder): """Extends `JSONEncoder`. Define some additional encoding techniques. """ def default(self, o): # pylint: disable=method-hidden """Extends `default`. This handles some instances that need a bit of casting in order to encode. """ if isinstance(o, objectid.ObjectId): return str(o) if isinstance(o, bytes): return str(o) if isinstance(o, datetime): return o.isoformat() return json.JSONEncoder.default(self, o) class SnakeHandler(web.RequestHandler): # pylint: disable=abstract-method """Extends `RequestHandler`. Defines addtional methods and overrides to suit snake. """ @staticmethod def _jsonify(data): return JSONEncoder(sort_keys=True, indent=True).encode(data) def _write_error_generic(self, status_code): self.set_status(status_code) self.write({ "status": "error", "message": "snake has encountered an Error!" }) def set_default_headers(self): self.set_header("Access-Control-Allow-Origin", "*") self.set_header("Access-Control-Allow-Headers", "access-control-allow-origin, x-requested-with, content-type") self.set_header('Access-Control-Allow-Methods', 'GET, OPTIONS, PATCH, POST') def options(self, *args, **kwargs): # NOTE: We only want to push CORS stuff so we don't care about args self.set_status(204) self.finish() def create_filter(self, args, operator): """Create a mongo filter. Parses a list of request arguments to create a filter for use with mongodb. Args: args (list): A list of request arguments. operator (str): The type of filter. Returns: dict: The mongodb filter. """ _filter = [] for arg in args: if 'filter' in arg: try: f_key = arg.split('[')[1].split(']')[0] for f_arg in self.get_arguments(arg): if '$regex' in f_arg: _filter += [{f_key: escape.json_decode(f_arg)}] else: _filter += [{f_key: f_arg}] except Exception: # noqa pass if not _filter: return None elif len(_filter) == 1: _filter = _filter[0] else: if operator == 'or': _filter = {"$or": _filter} else: _filter = {"$and": _filter} return _filter def create_args(self, args): _args = {} for arg in args: if 'args' == arg[:4]: a_key = arg.split('[')[1].split(']')[0] _args[a_key] = self.get_arguments(arg)[0] return _args @staticmethod def json_decode(data): """Decode json. Decodes json but performs escaping first. Args: data (str): A json string. Returns: obj: The decoded json. """ return json.loads(escape.to_basestring(data)) def jsonify(self, data): """Jsonify. This creates the response JSON in the format that snake wants and writes it. Args: data (obj): The data to turn into json. """ resp = { "status": "success", "data": data } self.write(JSONEncoder(sort_keys=True, indent=True).encode(resp)) def write_error(self, status_code, **kwargs): """Write an error response. This handles the writing and formatting of response errors. Args: status_code (int): The error code. **kwargs: Arbitrary keyword arguments. """ if 'exc_info' not in kwargs: self._write_error_generic(status_code) return _, err, _ = kwargs['exc_info'] if isinstance(err, tornadoparser.HTTPError): # Handle webargs/marshmallow fails self.write(self._jsonify({ "status": "fail", "message": err.messages })) return if not isinstance(err, error.SnakeError): self._write_error_generic(status_code) return if err.status_code is None: self.set_status(status_code) else: self.set_status(err.status_code) self.write(self._jsonify({ "status": "error", "message": err.message })) return def write_warning(self, message, status_code=400, data=None): """Write a warning response. This handles the writing and formatting of response warnings. Args: message (str): The warning message status_code (int, optional): The status code. Defaults to 400 data (obj): Additional data for the warning. """ body = { "status": "error", "message": message } _message = str(message) if data: _message += '\n' if isinstance(data, dict): _message += self._jsonify(data) elif isinstance(data, list): _message += self._jsonify(data) else: _message += str(data) app_log.warning(_message) if data: body['data'] = data self.set_status(status_code) self.write(self._jsonify(body)) class DefaultHandler(SnakeHandler): # pylint: disable=abstract-method """Extend `SnakeHandler`. Just the basic default request class for snake that will return a 404 when an unknown route is requested. """ async def prepare(self): self.write_warning({'api_version': constants.API_VERSION}, 404) self.finish() @web.stream_request_body class StreamHandler(SnakeHandler): """Extend `SnakeHandler`. This is the stream handler, it is used to handle large objects without eating up all the memory as a traditional Handler would. As a result this is quite a complicated class. It will live parse the data as it is being recieved and extract any files to disk replacing that data with the paths of the extracted files. Attributes: bytes_read (int): Total number of bytes read. content_length (int): Total length of content. content_type (str): Type of the content being received. data (bytes): The request data but without the file paths instead of the files. error (obj): Any error encounterd. stream (:obj:`Stream`): The streaming state. """ class Stream(): # pylint: disable=too-few-public-methods """The stream state This is used to store the state of the streaming data. Attributes: boundary (str): The request boundary. Used to determine the metadata from the content. header (bytes): The header. file_count (int): Number of files in the request. state (int): The state of the state machine used for live parsing. tail (bytes): The tail of the previous chunk. working_dir (obj): The `TemporaryDirectory` where the data is being saved to. """ def __init__(self): self.boundary = None self.header = bytes() self.file_count = 0 self.state = 0 self.tail = bytes() self.working_dir = None def initialize(self): """Extend `initialize`. Works out what sort of request we have and how to parse it. Streaming may not actually be required in which case it will not be used. """ self.bytes_read = 0 self.content_length = 0 self.content_type = '' self.data = bytes() self.error = None self.stream = None if self.request.headers and 'Content-Encoding' in self.request.headers: gen_log.warning("Unsupported Content-Encoding: %s", self.request.headers['Content-Encoding']) return if self.request.headers and 'Content-Type' in self.request.headers: self.content_length = int(self.request.headers['Content-Length']) if 'Content-Length' in self.request.headers else 0 self.content_type = self.request.headers['Content-Type'] if self.content_type.startswith("application/x-www-form-urlencoded"): return elif self.content_type.startswith("multipart/form-data"): # If we have a POST that is multipart/form-data we will stream any file # content to disk. This will prevent excessive RAM usage. Clearly we # will need to keep tabs on the overall data size or someone could # still use too much RAM! self.stream = self.Stream() boundary = None fields = self.content_type.split(";") for field in fields: k, _, v = field.strip().partition("=") if k == "boundary" and v: boundary = bytes(v, 'utf8') if not boundary: raise error.SnakeError('Content boundary not found') if boundary.startswith(b'"') and boundary.endswith(b'"'): boundary = boundary[1:-1] self.stream.boundary = boundary self.stream.working_dir = tempfile.TemporaryDirectory(dir=path.abspath(path.expanduser(snake_config['cache_dir']))) else: self.error = error.SnakeError('Unsupported Content-Type: %s' % self.content_type) # NOTE: We are live parsing the request body here using a overlapping # sliding window! We need to make sure that this has no errors or we are # gonna ingest files incorrectly!!! If anything bad happens we are ducked! def data_received(self, chunk): # pylint: disable=too-many-branches, too-many-statements if self.error: raise self.error # pylint: disable=raising-bad-type self.bytes_read += len(chunk) if len(self.data) > 104857600: # Ensure the someone is not trying to fill RAM, 100MB raise error.SnakeError('Content-Length too large (truncated)') if self.stream: # Cache files to disk chunk = self.stream.tail + chunk chunk_len = len(chunk) i = 0 while i < chunk_len: if self.stream.state == 0: # Find start of header soh = chunk.find(b'--' + self.stream.boundary, i) if soh != -1: self.data += chunk[soh:soh + len(self.stream.boundary) + 4] i = soh + len(self.stream.boundary) + 4 self.stream.state = 1 continue elif self.stream.state == 1: # Find end of header eoh = chunk.find(b'\r\n\r\n', i) if eoh != -1: self.stream.header += chunk[i:eoh + 4] i = eoh + 4 if b'filename=' in self.stream.header: # We have a file self.stream.state = 2 else: self.stream.state = 3 self.data += self.stream.header self.stream.header = bytes() continue elif self.stream.state == 2: # Handle file based content soh = chunk.find(b'--' + self.stream.boundary, i) if soh != -1: f_path = path.join(self.stream.working_dir.name, str(self.stream.file_count)) with open(f_path, 'a+b') as f: f.write(chunk[i:soh - 2]) # -2 drops the extra '\r\n' self.data += bytes(f_path + '\r\n', 'utf-8') self.stream.file_count += 1 i = soh self.stream.state = 0 continue elif self.stream.state == 3: # Handle all other content soh = chunk.find(b'--' + self.stream.boundary, i) if soh != -1: self.data += chunk[i:soh] i = soh self.stream.state = 0 continue # Handle the overlapping tail if i + TAIL_SIZE < chunk_len: if self.stream.state == 2: f_path = path.join(self.stream.working_dir.name, str(self.stream.file_count)) with open(f_path, 'a+b') as f: f.write(chunk[i:chunk_len - TAIL_SIZE]) elif self.stream.state == 1: self.stream.header += chunk[i:chunk_len - TAIL_SIZE] else: self.data += chunk[i:chunk_len - TAIL_SIZE] self.stream.tail = chunk[chunk_len - TAIL_SIZE:] i += chunk_len else: self.stream.tail = chunk[i:] i += chunk_len else: # Otherwise be normal self.data += chunk if self.bytes_read >= self.content_length: # Finished, parse the new content httputil.parse_body_arguments(self.content_type, self.data, self.request.body_arguments, self.request.files, headers=None) for k, v in self.request.body_arguments.items(): self.request.arguments.setdefault(k, []).extend(v) def on_finish(self): if self.stream: self.stream.working_dir.cleanup()
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,705
TheoKlein/snake-core
refs/heads/master
/snake/schema.py
"""The schema module. This contains the schema used throughout snake. """ import copy import marshmallow from snake import enums from snake import fields class Schema(marshmallow.Schema): """Extends `Schema`. This allows for dynamic creation of a schema which is needed to validate arguments within scales. """ def __init__(self, *args, **kwargs): self.additional_fields = kwargs.pop('fields', None) super().__init__(*args, **kwargs) if self.additional_fields: self.declared_fields.update(copy.deepcopy(self.additional_fields)) # pylint: disable=no-member self._update_fields(many=self.many) class CommandSchema(Schema): """The command schema. This is the base schema for the command document stored within the mongo database. Note: Scales are allowed to embed additional information into this document but it will be ignored. """ _id = fields.ObjectId(load_only=True) _output_id = fields.ObjectId(load_only=True, missing=None) # GridFS sha256_digest = fields.Str(required=True) scale = fields.Str(required=True) command = fields.Str(required=True) args = fields.Dict(default={}, missing={}) asynchronous = fields.Boolean(default=False) timeout = fields.Int(default=600) format = fields.Str(type=enums.Format, missing=enums.Format.JSON) output = fields.Raw(dump_only=True, default=None, missing=None) status = fields.Str(type=enums.Status, missing=enums.Status.PENDING, default=enums.Status.PENDING) timestamp = fields.DateTime("%Y-%m-%dT%H:%M:%S.%f") start_time = fields.DateTime("%Y-%m-%dT%H:%M:%S.%f") end_time = fields.DateTime("%Y-%m-%dT%H:%M:%S.%f") class FileSchema(Schema): """The file schema. This is the schema for the file document stored within the mongo database. """ not_blank = marshmallow.validate.Length(min=1, error='Field cannot be blank') _id = fields.ObjectId(load_only=True) file_type = fields.Enum(required=True, type=enums.FileType, missing=enums.FileType.FILE) name = fields.Str(required=True, validate=not_blank) sha256_digest = fields.Str() description = fields.Str() tags = fields.Str() magic = fields.Str() mime = fields.Str() size = fields.Int() timestamp = fields.DateTime("%Y-%m-%dT%H:%M:%S.%f") submission_type = fields.Str(validate=not_blank, default="unknown") parents = fields.Dict(values=fields.List(fields.Str(validate=not_blank)), keys=fields.Str(validate=not_blank), default={}) children = fields.Dict(values=fields.List(fields.Str(validate=not_blank)), keys=fields.Str(validate=not_blank), default={}) class NoteSchema(Schema): """The note schema. This is the schema for the note document stored within the mongo database. """ _id = fields.ObjectId(load_only=True) sha256_digest = fields.Str(required=True) body = fields.Str(required=True) timestamp = fields.DateTime("%Y-%m-%dT%H:%M:%S.%f") updated_time = fields.DateTime("%Y-%m-%dT%H:%M:%S.%f")
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,706
TheoKlein/snake-core
refs/heads/master
/tests/core/test_celery.py
# pylint: disable=missing-docstring import json from celery.contrib.testing import mocks import pytest from snake import error from snake import schema from snake.core import celery # pylint: disable=invalid-name def test_snake_request_kill_child_processes(mocker): """ Test SnakeRequest: kill_child_processes """ def fake_kill(*args, **kwargs): # pylint: disable=unused-argument raise OSError def fake_ps(*args, **kwargs): # pylint: disable=unused-argument class FakePs: # pylint: disable=too-few-public-methods returncode = 0 stdout = b'1234\n' return FakePs() mocker.patch('time.sleep') SnakeRequest = celery.SnakeRequest # Test return failed ps command mocker.patch('subprocess.run') snake_request = SnakeRequest(mocks.TaskMessage('test'), task='1234') snake_request.kill_child_processes(1234) mocker.patch('subprocess.run') # Test a match and fake kill mocker.patch('os.kill') mocker.patch('subprocess.run', fake_ps) snake_request = SnakeRequest(mocks.TaskMessage('test'), task='1234') snake_request.kill_child_processes(1234) mocker.patch('os.kill') mocker.patch('subprocess.run') # Test a no match mocker.patch('os.kill') mocker.patch('subprocess.run', fake_ps) snake_request = SnakeRequest(mocks.TaskMessage('test'), task='1234') snake_request.kill_child_processes(4321) mocker.patch('os.kill') mocker.patch('subprocess.run') # Test trying to kill a dead process mocker.patch('os.kill', fake_kill) mocker.patch('subprocess.run', fake_ps) snake_request = SnakeRequest(mocks.TaskMessage('test'), task='1234') snake_request.kill_child_processes(1234) mocker.patch('os.kill') mocker.patch('subprocess.run') def test_snake_request_on_timeout(): """ Test SnakeRequest: timeout """ # pylint: disable=no-member, protected-access def kill_child_processes(self, *args, **kwargs): # pylint: disable=unused-argument self.abcd = args def _on_timeout(*args, **kwargs): # pylint: disable=unused-argument return SnakeRequest = celery.SnakeRequest SnakeRequest.kill_child_processes = kill_child_processes SnakeRequest._on_timeout = _on_timeout # Test soft, is will set optional arg SIGTERM over SIGKILL snake_request = SnakeRequest(mocks.TaskMessage('test'), task='1234') snake_request.on_timeout(soft=True, timeout=1) assert len(snake_request.abcd) == 2 # Test hard snake_request = SnakeRequest(mocks.TaskMessage('test'), task='1234') snake_request.on_timeout(soft=False, timeout=1) assert len(snake_request.abcd) == 1 def test_execute_command(mocker): """ Test the execute_command function """ # NOTE: The setup probably warrant tests in themselves but this is better than nothing ;) base_data = schema.CommandSchema().load({'sha256_digest': 'abcd', 'scale': 'abcd', 'command': 'abcd'}) class DataBase(): # pylint: disable=too-few-public-methods def __init__(self): # pylint: disable=unused-argument self.data = schema.CommandSchema().dump(base_data) self.output = '' database = DataBase() class CommandCollection(): # pylint: disable=too-few-public-methods, no-self-use def __init__(self, db): # pylint: disable=unused-argument self.db = db def update(self, sha256_digest, scale, command, data): # pylint: disable=unused-argument self.db.data = data class CommandOutputCollection(): # pylint: disable=too-few-public-methods, no-self-use def __init__(self, db): # pylint: disable=unused-argument self.db = db def put(self, file_name, data): # pylint: disable=unused-argument self.db.output = data class MongoClient(): # pylint: disable=too-few-public-methods class Snake: def __init__(self, db): # pylint: disable=unused-argument self.snake = db def dummy(self, *args, **kwargs): # pylint: disable=unused-argument return self.snake __enter__ = dummy __exit__ = dummy def __init__(self, db): # pylint: disable=unused-argument self.snake = self.Snake(database) class ScaleManagerCW(): # pylint: disable=too-few-public-methods def __init__(self, *args, **kwargs): # pylint: disable=unused-argument raise error.CommandWarning('error') class ScaleManagerSE(): # pylint: disable=too-few-public-methods def __init__(self, *args, **kwargs): # pylint: disable=unused-argument raise error.SnakeError('error') class ScaleManagerTE(): # pylint: disable=too-few-public-methods def __init__(self, *args, **kwargs): # pylint: disable=unused-argument raise BrokenPipeError('error') class ScaleManagerE(): # pylint: disable=too-few-public-methods def __init__(self, *args, **kwargs): # pylint: disable=unused-argument raise Exception('error') def dumps(data): try: return str(data).replace("\'", '\"') except Exception as err: return '{"dummy": "%s"}' % err mocker.patch('json.dumps', dumps) mocker.patch('pymongo.MongoClient', MongoClient) mocker.patch('snake.core.scale_manager.ScaleManager') mocker.patch('snake.engines.mongo.command.CommandCollection', CommandCollection) mocker.patch('snake.engines.mongo.command.CommandOutputCollection', CommandOutputCollection) # Test success data = schema.CommandSchema().dump(base_data) celery.execute_command(data) assert database.data['status'] == 'success' # Cause command warning mocker.patch('snake.core.scale_manager.ScaleManager', ScaleManagerCW) data = schema.CommandSchema().dump(base_data) celery.execute_command(data) assert database.data['status'] == 'failed' output = database.output if isinstance(output, bytes): output = output.decode('utf-8') assert 'error' in json.loads(output) # Cause snake error mocker.patch('snake.core.scale_manager.ScaleManager', ScaleManagerSE) data = schema.CommandSchema().dump(base_data) celery.execute_command(data) assert database.data['status'] == 'failed' output = database.output if isinstance(output, bytes): output = output.decode('utf-8') assert 'error' in json.loads(output) # Cause timeout error mocker.patch('snake.core.scale_manager.ScaleManager', ScaleManagerTE) data = schema.CommandSchema().dump(base_data) celery.execute_command(data) assert database.data['status'] == 'failed' output = database.output if isinstance(output, bytes): output = output.decode('utf-8') assert 'error' in json.loads(output) # Cause general error mocker.patch('snake.core.scale_manager.ScaleManager', ScaleManagerE) data = schema.CommandSchema().dump(base_data) celery.execute_command(data) assert database.data['status'] == 'failed' output = database.output if isinstance(output, bytes): output = output.decode('utf-8') assert 'error' in json.loads(output) @pytest.mark.asyncio async def test_wait_for_task(mocker): """ Test wait_for_task function """ class Task(): # pylint: disable=too-few-public-methods result = 1 set_ready = False def ready(self): return self.set_ready task = Task() async def sleep(time): # pylint: disable=unused-argument task.set_ready = True mocker.patch('asyncio.sleep', sleep) # Test ready task task.set_ready = True await celery.wait_for_task(task) # Test non ready task task.set_ready = False await celery.wait_for_task(task)
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,707
TheoKlein/snake-core
refs/heads/master
/snake/worker.py
"""The worker module. This is worker used by celery. They form the snake pit! Example: celery worker --app snake.worker # Spin a single snake worker Attributes: app (:obj:`Celery`): The celery object used by celery. """ from celery import bootsteps from celery.bin import Option from snake.config import config_parser from snake.config import snake_config from snake.core.celery import celery class CustomArgs(bootsteps.Step): """Custom arguments for celery. This allows for a custom configuration file to be passed throught the command line. Mainly used for testing. """ def __init__(self, worker, worker_config, **options): # pylint: disable=super-init-not-called, unused-argument if worker_config: # NOTE: While the core will have the original settings, as the worker # is in effect standalone this should not result in any configuration # clashing! config_parser.load_config(worker_config[0]) worker.app.conf.update(**snake_config) app = celery # pylint: disable=invalid-name app.user_options['worker'].add( Option('--worker_config', dest='worker_config', default=None, help='Custom worker configuration') ) app.steps['worker'].add(CustomArgs)
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,708
TheoKlein/snake-core
refs/heads/master
/snake/routes/note.py
""" The note route module. Attributes: NoteRoute (tuple): The NoteRoute. NotePostRoute (tuple): The NotePostRoute. NotesRoute (tuple): The NotesRoute. """ from datetime import datetime from webargs import tornadoparser from snake import db from snake import fields from snake import schema from snake.core import snake_handler # pylint: disable=abstract-method # pylint: disable=arguments-differ class NoteHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" async def get(self, sha256_digest): document = await db.async_note_collection.select(sha256_digest) if not document: self.write_warning("note - no sample for given sha256 digest", 404, sha256_digest) self.finish() return document = schema.NoteSchema().dump(schema.NoteSchema().load(document)) self.jsonify({'note': document}) self.finish() async def delete(self, sha256_digest): document = await db.async_note_collection.select(sha256_digest) if not document: self.write_warning("note - no sample for given sha256 digest", 404, sha256_digest) self.finish() return await db.async_note_collection.delete(sha256_digest) self.set_status(200) self.jsonify(None) self.finish() async def patch(self, sha256_digest): document = await db.async_note_collection.select(sha256_digest) if not document: self.write_warning("note - no sample for given sha256 digest", 404, sha256_digest) self.finish() return if not self.request.body: self.write_warning("note - no request body found", 422) self.finish() return data = self.json_decode(self.request.body) data = schema.NoteSchema(only=('body',)).load(data) data['updated_time'] = datetime.utcnow() data = schema.NoteSchema().dump(data) if data.keys(): await db.async_note_collection.update(sha256_digest, data) document = await db.async_note_collection.select(sha256_digest) document = schema.NoteSchema().dump(schema.NoteSchema().load(document)) self.jsonify({'note': document}) self.finish() async def put(self, sha256_digest): document = await db.async_note_collection.select(sha256_digest) if not document: self.write_warning("note - no sample for given sha256 digest", 404, sha256_digest) self.finish() return if not self.request.body: self.write_warning("note - no request body found", 422) self.finish() return data = self.json_decode(self.request.body) if 'body' not in data.keys(): data['body'] = '' data = schema.NoteSchema(only=('body',)).load(data) data['updated_time'] = datetime.utcnow() data = schema.NoteSchema().dump(data) await db.async_note_collection.update(sha256_digest, data) document = await db.async_note_collection.select(sha256_digest) document = schema.NoteSchema().dump(schema.NoteSchema().load(document)) self.jsonify({'note': document}) self.finish() class NotePostHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" @tornadoparser.use_args(schema.NoteSchema()) async def post(self, data): document = await db.async_file_collection.select(data['sha256_digest']) if not document: self.write_warning("note - no sample for given data", 404, data) self.finish() return document = await db.async_note_collection.select(data['sha256_digest']) if document: document = schema.NoteSchema().dump(schema.NoteSchema().load(document)) self.write_warning("note - note already exists for given data", 409, {'note': document}) self.finish() return data['timestamp'] = datetime.utcnow() data = schema.NoteSchema().dump(data) await db.async_note_collection.insert(data) document = await db.async_note_collection.select(data['sha256_digest']) document = schema.NoteSchema().dump(schema.NoteSchema().load(document)) self.jsonify({'note': document}) self.finish() class NotesHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" @tornadoparser.use_args({ 'sha256_digest': fields.Str(required=False), }) async def get(self, data): documents = [] if 'sha256_digest' in data.keys(): cursor = db.async_note_collection.select_many(data['sha256_digest']) while await cursor.fetch_next: documents += [cursor.next_object()] else: cursor = db.async_note_collection.select_all() while await cursor.fetch_next: documents += [cursor.next_object()] documents = schema.NoteSchema(many=True).dump(schema.NoteSchema(many=True).load(documents)) self.jsonify({'notes': documents}) self.finish() @tornadoparser.use_args(schema.NoteSchema(many=True)) async def post(self, data): if data == []: self.write_warning("note - no request body found", 422) self.finish() return # Check that there is a file for each hash missing = [] for i in data: document = await db.async_file_collection.select(i['sha256_digest']) if not document: missing += [i] if missing: self.write_warning("note - no sample for given data", 404, missing) self.finish() return # Check that there is a note for each hash exists = [] for i in data: document = await db.async_note_collection.select(i['sha256_digest']) if document: exists += [schema.NoteSchema().dump(schema.NoteSchema().load(document))] if exists: self.write_warning("note - note already exists for given data", 409, exists) self.finish() return documents = [] timestamp = datetime.utcnow() for i in data: i['timestamp'] = timestamp i = schema.NoteSchema().dump(i) await db.async_note_collection.insert(i) documents += [await db.async_note_collection.select(i['sha256_digest'])] documents = schema.NoteSchema(many=True).dump(schema.NoteSchema(many=True).load(documents)) self.jsonify({'notes': documents}) self.finish() NoteRoute = (r"/note/(?P<sha256_digest>[a-zA-Z0-9]+)?", NoteHandler) # pylint: disable=invalid-name NotePostRoute = (r"/note", NotePostHandler) # pylint: disable=invalid-name NotesRoute = (r"/notes", NotesHandler) # pylint: disable=invalid-name
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,709
TheoKlein/snake-core
refs/heads/master
/snake/routes/scale.py
""" The file route module. Attributes: ScaleRoute (tuple): The ScaleRoute. ScaleCommandsRoute (tuple): The ScaleCommandsRoute. ScaleInterfaceRoute (tuple): The ScaleInterfaceRoute. ScaleUploadRoute (tuple): The ScaleUploadRoute. ScalesRoute (tuple): The ScalesRoute. """ import asyncio import hashlib import tempfile from os import path from datetime import datetime from marshmallow import exceptions from tornado import escape from webargs import tornadoparser from snake import db from snake import enums from snake import error from snake import fields from snake import schema from snake.config import snake_config from snake.core import route_support from snake.core import snake_handler from snake.managers import scale_manager # pylint: disable=abstract-method # pylint: disable=arguments-differ class ScaleHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" async def get(self, scale): reload = self.get_argument('reload', "false") if reload.lower() == "true": # Ignore all other values scale_manager.reload_scales() try: _scale = scale_manager.get_scale(scale) except error.SnakeError as err: self.write_warning("scale - %s" % err, err.status_code, scale) self.finish() return self.jsonify({"scale": _scale.info()}) self.finish() class ScaleCommandsHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" async def get(self, scale): try: _scale = scale_manager.get_scale(scale) commands = scale_manager.get_component(_scale, enums.ScaleComponent.COMMANDS) except error.SnakeError as err: self.write_warning("scale - %s" % err, err.status_code, scale) self.finish() return self.jsonify({"commands": commands.snake.info()}) self.finish() class ScaleInterfaceHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" class InterfaceSchema(schema.Schema): """Extends `Schema`. Create schema for interface requests. """ args = fields.Dict(required=False, default={}, missing={}) command = fields.Str(required=True) format = fields.Str(type=enums.Format, missing=enums.Format.JSON) sha256_digest = fields.Str(required=True) type = fields.Str(type=enums.InterfaceType, missing=enums.InterfaceType.PULL) async def get(self, scale): _scale = scale_manager.get_scale(scale) interface = scale_manager.get_component(_scale, enums.ScaleComponent.INTERFACE) self.jsonify({"interface": interface.snake.info()}) self.finish() async def post(self, scale): if not self.request.body: self.write_warning("scale/interface - no request body found", 422, scale) self.finish() return data = escape.json_decode(self.request.body) try: data = self.InterfaceSchema().dump(self.InterfaceSchema().load(data)) except exceptions.ValidationError as err: self.write_warning(self.json_decode(('%s' % err.messages).replace("'", '"')), 422) self.finish() return document = await db.async_file_collection.select(data['sha256_digest']) if not document: self.write_warning("scale/interface - no sample for given data", 404, data) self.finish() return # Get the push/pull and args _scale = scale_manager.get_scale(scale) interface = scale_manager.get_component(_scale, enums.ScaleComponent.INTERFACE) command = scale_manager.get_interface_command(interface, data['type'], data['command']) data['timestamp'] = datetime.utcnow() # Execute command # TODO: Handle status don't always chuck errors... try: loop = asyncio.get_event_loop() output = await loop.run_in_executor(None, command, data['args'], data['sha256_digest']) # output = p(data['args'], data['sha256_digest']) except exceptions.ValidationError as err: self.write_warning(self.json_decode(('{"args": %s}' % err.messages).replace("'", '"')), 422) self.finish() return except error.SnakeError as err: self.write_warning("%s" % err, err.status_code, data) self.finish() return # Run formating data['output'] = interface.snake.format(data['format'], data['command'], output) self.jsonify({"interface": data}) self.finish() class ScaleUploadHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" class UploadSchema(schema.FileSchema): """Extends `FileSchema`.""" args = fields.Dict(required=False, default={}, missing={}) extract = fields.Bool(missing=False) name = fields.Str(missing=None) # Override name password = fields.Str(missing=None) async def get(self, scale): scale_ = scale_manager.get_scale(scale) upload = scale_manager.get_component(scale_, enums.ScaleComponent.UPLOAD) self.jsonify({"upload": upload.snake.info()}) self.finish() async def post(self, scale): # pylint: disable=too-many-locals if not self.request.body: self.write_warning("scale/upload - no request body found", 422, scale) self.finish() return data = escape.json_decode(self.request.body) # Validate args try: data = self.UploadSchema().dump(self.UploadSchema().load(data)) except exceptions.ValidationError as err: self.write_warning(self.json_decode(('%s' % err.messages).replace("'", '"')), 422) self.finish() return scale_ = scale_manager.get_scale(scale) upload = scale_manager.get_component(scale_, enums.ScaleComponent.UPLOAD) # Validate arguments and update upld_args = upload.arguments() try: if upld_args: data['args'] = schema.Schema(fields=upld_args).load(data['args']) except exceptions.ValidationError as err: self.write_warning(self.json_decode(('{"args": %s}' % err.messages).replace("'", '"')), 422) self.finish() return # Get the file with tempfile.TemporaryDirectory(dir=path.abspath(path.expanduser(snake_config['cache_dir']))) as temp_dir: loop = asyncio.get_event_loop() f_name = await loop.run_in_executor(None, upload.upload, data['args'], temp_dir) f_path = path.join(temp_dir, f_name) # Extract if required, zip only if data['extract']: f_path = await route_support.unzip_file(f_path, data['password']) f_name = path.basename(f_path) # Update name if not overriden if not data['name']: data['name'] = f_name # Set submission type data['submission_type'] = 'upload:{}'.format(scale) # Check that the file is not empty if path.getsize(f_path) == 0: self.write_warning("scale/upload - sample is empty", 422) self.finish() return # Hash the file sha2 = hashlib.sha256() with open(f_path, 'rb') as f: chunk = f.read(4096) while chunk: sha2.update(chunk) chunk = f.read(4096) sha256_digest = sha2.hexdigest() # Check if the file already exists document = await db.async_file_collection.select(sha256_digest) if document: document = schema.FileSchema().dump(schema.FileSchema().load(document)) self.write_warning("scale/upload - sample already exists for given sha256 digest", 409, {'sample': document}) self.finish() return # Save the file and add it to the database document = await route_support.store_file(sha256_digest, f_path, data['file_type'], data) document = schema.FileSchema().dump(schema.FileSchema().load(document)) self.jsonify({'sample': document}) self.finish() class ScalesHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" @tornadoparser.use_args({ 'file_type': fields.Enum(type=enums.FileType, missing=None, required=None), 'reload': fields.Boolean(missing=False, required=False), }) async def get(self, data): if data['reload']: scale_manager.reload_scales() _scale = scale_manager.get_scales(file_type=data['file_type']) self.jsonify({"scales": _scale}) self.finish() ScaleRoute = (r"/scale/(?P<scale>[^\/]+)?", ScaleHandler) # pylint: disable=invalid-name ScaleCommandsRoute = (r"/scale/(?P<scale>[^\/]+)?/commands", ScaleCommandsHandler) # pylint: disable=invalid-name ScaleInterfaceRoute = (r"/scale/(?P<scale>[^\/]+)?/interface", ScaleInterfaceHandler) # pylint: disable=invalid-name ScaleUploadRoute = (r"/scale/(?P<scale>[^\/]+)?/upload", ScaleUploadHandler) # pylint: disable=invalid-name ScalesRoute = (r"/scales", ScalesHandler) # pylint: disable=invalid-name
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,710
TheoKlein/snake-core
refs/heads/master
/snake/routes/command.py
""" The command route module. Attributes: CommandRoute (tuple): The CommandRoute. CommandsRoute (tuple): The CommandsRoute. """ import copy import json from marshmallow import exceptions from webargs import tornadoparser from snake import db from snake import enums from snake import fields from snake import schema from snake.core import route_support from snake.core import snake_handler from snake.error import ScaleError, SnakeError from snake.managers import scale_manager # pylint: disable=abstract-method # pylint: disable=arguments-differ def validate_args(cmd, args): """Validate arguments. Validates the request provided arguments against that expected by the command. Args: cmd (func): The command function. args (dict): The args to validate. """ cmd_args = cmd.cmd_opts.args if cmd_args: try: s = schema.Schema(fields=copy.deepcopy(cmd_args)) return True, s.load(args) except exceptions.ValidationError as err: return False, {'args': err.messages} return True, args class CommandHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" @tornadoparser.use_args({ # 'args': fields.Dict(required=False, default={}, missing={}), 'command': fields.Str(required=True), 'format': fields.Str(type=enums.Format, missing=enums.Format.JSON), 'output': fields.Bool(required=False, default=True, missing=True), 'scale': fields.Str(required=True), 'sha256_digest': fields.Str(required=True) }) async def get(self, data): # NOTE: Tornado/Marshmallow does not like Dict in args, will have to parse manually # TODO: Use marshmallow validation if 'args' in self.request.arguments and self.request.arguments['args']: data['args'] = json.loads(self.request.arguments['args'][0]) else: data['args'] = {} document = await db.async_command_collection.select(data['sha256_digest'], data['scale'], data['command'], data['args']) if not document: self.write_warning("no output for given data", 404, data) self.finish() return if document['status'] == enums.Status.ERROR: self.write_warning("%s" % document['output'], 404, data) self.finish() return document = schema.CommandSchema().load(document) output = None if document['_output_id']: output = await db.async_command_output_collection.get(document['_output_id']) try: scale = scale_manager.get_scale(data['scale']) commands = scale_manager.get_component(scale, enums.ScaleComponent.COMMANDS) if data['output']: document['output'] = commands.snake.format(data['format'], document['command'], output) document['format'] = data['format'] except (SnakeError, TypeError) as err: self.write_warning("%s" % err, 404, data) self.finish() return document = schema.CommandSchema().dump(document) self.jsonify({'command': document}) self.finish() @tornadoparser.use_args({ 'args': fields.Dict(required=False, default={}, missing={}), 'asynchronous': fields.Bool(required=False), 'command': fields.Str(required=True), 'format': fields.Str(type=enums.Format, missing=enums.Format.JSON), 'scale': fields.Str(required=True), 'sha256_digest': fields.Str(required=True), 'timeout': fields.Int(required=False) }) async def post(self, data): # Check that there is a file for this hash document = await db.async_file_collection.select(data['sha256_digest']) if not document: self.write_warning("no sample for given data", 404, data) self.finish() return # Check scale support try: scale = scale_manager.get_scale(data['scale'], document['file_type']) commands = scale_manager.get_component(scale, enums.ScaleComponent.COMMANDS) cmd = commands.snake.command(data['command']) except SnakeError as err: self.write_warning("%s" % err, 404, data) self.finish() return # Validate arguments as to not waste users time, yes this is also done on execution result, args = validate_args(cmd, data['args']) if not result: self.write_warning(args, 422, data) self.finish() return data['args'] = args # Queue command try: document = await route_support.queue_command(data) except SnakeError as err: self.write_warning("%s" % err, 500, data) self.finish() return document = schema.CommandSchema().load(document) output = None if document['_output_id']: output = await db.async_command_output_collection.get(document['_output_id']) try: document['output'] = commands.snake.format(data['format'], document['command'], output) document['format'] = data['format'] except SnakeError as err: self.write_warning("%s" % err, 404, data) self.finish() return # Dump and finish document = schema.CommandSchema().dump(document) self.jsonify({"command": document}) self.finish() class CommandsHandler(snake_handler.SnakeHandler): """Extends `SnakeHandler`.""" # XXX: Have an 'error' field instead of being silent? class GetSchema(schema.Schema): """Extends `Schema`. Defines the valid schema for get request. """ args = fields.Dict(required=False, default={}, missing={}) command = fields.Str(required=False) format = fields.Str(type=enums.Format, missing=enums.Format.JSON) output = fields.Bool(required=False, default=True, missing=True) sha256_digests = fields.List(fields.Str(), required=False) scale = fields.Str(required=False) class CommandsSchema(schema.Schema): """Extends `Schema`. Defines the valid schema for post request. """ args = fields.Dict(required=False, default={}, missing={}) command = fields.Str(required=True) format = fields.Str(type=enums.Format, missing=enums.Format.JSON) sha256_digests = fields.List(fields.Str(), required=True) scale = fields.Str(required=True) timeout = fields.Int(required=False) async def _get_documents(self, sha, sca, cmd, args, fmt, otpt): documents = [] cur = db.async_command_collection.select_many(sha256_digest=sha, scale=sca, command=cmd, args=args, sort="timestamp") while await cur.fetch_next: doc = cur.next_object() doc = schema.CommandSchema().load(doc) try: # Ignore output for missing scales and/or commands scale = scale_manager.get_scale(doc['scale']) commands = scale_manager.get_component(scale, enums.ScaleComponent.COMMANDS) except Exception as err: print("%s - %s" % (doc['scale'], err)) # TODO: Output to log continue output = None if '_output_id' in doc and doc['_output_id']: output = await db.async_command_output_collection.get(doc['_output_id']) doc = schema.CommandSchema().dump(doc) try: if otpt: doc['output'] = commands.snake.format(fmt, cmd, output) doc['format'] = fmt except (SnakeError, TypeError) as err: print("%s - %s" % (doc['scale'], err)) # TODO: Output to log continue documents += [doc] return documents @tornadoparser.use_args(GetSchema(many=True)) async def get(self, data): # pylint: disable=too-many-branches # XXX: This whole function is shit # TODO: Should further clean this # TODO: SORT # We accept RESTful syntax and JSON syntax to allow for increased # control. As this is a GET and we are RESTful, URI wins over JSON uri_data = {} for arg in self.request.arguments: if arg == 'sha256_digest': uri_data['sha256_digests'] = [self.get_argument(arg)] else: uri_data[arg] = self.get_argument(arg) if uri_data.keys(): uri_data['args'] = self.create_args(self.request.arguments) uri_data = self.GetSchema().load(uri_data) data = [uri_data] documents = [] # Handle no args, and return early if not data: try: cur = db.async_command_collection.select_all(sort="timestamp") while await cur.fetch_next: documents += [cur.next_object()] except SnakeError as err: self.write_warning("commands - %s" % err, 404, data) self.finish() return # XXX: Fails to validate -__- # documents = schema.CommandSchema(many=True).dump(documents) self.jsonify({'commands': documents}) self.finish() return # Otherwise build query try: for i in data: scale = i['scale'] if 'scale' in i.keys() else None cmd = i['command'] if 'command' in i.keys() else None args = i['args'] if len(i['args']) > 0 else None if 'sha256_digests' in i.keys() and i['sha256_digests'] and i['sha256_digests'][0].lower() != 'all': if i['sha256_digests'][0][:4] == 'all:': # Handle file_type restrictions file_type = enums.FileType(i['sha256_digests'][0].lower().split(':')[1]) file_collection = db.async_file_collection.select_many(file_type=file_type) while await file_collection.fetch_next: sha = file_collection.next_object()['sha256_digest'] documents += await self._get_documents(sha, scale, cmd, args, i['format'], i['output']) else: for sha in i['sha256_digests']: documents += await self._get_documents(sha, scale, cmd, args, i['format'], i['output']) else: documents += await self._get_documents(None, scale, cmd, args, i['format'], i['output']) except SnakeError as err: self.write_warning("commands - %s" % err, 404, data) self.finish() return self.jsonify({'commands': documents}) self.finish() # pylint: disable=invalid-name @tornadoparser.use_args(CommandsSchema(many=True)) async def post(self, data): # pylint: disable=too-many-locals, too-many-branches, too-many-statements # XXX: Needs a major clean/rework if not data: self.write_warning("commands - no request body found", 422, data) self.finish() return # Find the commands and validate their arguments for d in data: # Find the command try: s = scale_manager.get_scale(d['scale']) c = scale_manager.get_component(s, enums.ScaleComponent.COMMANDS) cmd = c.snake.command(d['command']) except ScaleError as err: self.write_warning(err.message, err.status_code) self.finish() return result, args = validate_args(cmd, d['args']) if not result: self.write_warning(self.json_decode(args.replace("'", '"')), 422, data) self.finish() return d['args'] = args # Validate hashes and validate them against scales missing = [] unsupported = [] for d in data: s = scale_manager.get_scale(d['scale']) for sha in d['sha256_digests']: if sha.lower() == 'all': if not s.supports and not len(s.supports) == len([x for x in enums.FileType]): unsupported += [d] break elif sha.lower()[:4] == 'all:': file_type = sha.lower().split(':')[1] if file_type == 'file': ft = enums.FileType.FILE elif file_type == 'memory': ft = enums.FileType.MEMORY else: ft = None if ft is None or (s.supports and ft not in s.supports): unsupported += [(sha, s.name)] break else: document = await db.async_file_collection.select(sha) if not document: missing += [d] elif s.supports and document['file_type'] not in s.supports: # Check scale support unsupported += [d] if missing: self.write_warning("commands - no sample(s) for given data", 404, missing) self.finish() return if unsupported: self.write_warning("commands - command unsupported for given data", 422, unsupported) self.finish() return # Queue commands documents = [] for d in data: cmd_dict = {} for k, v in d.items(): if k != 'sha256_digests': cmd_dict[k] = v cmd_dict['asynchronous'] = True for sha in d['sha256_digests']: if sha.lower() == 'all': cursor = db.async_file_collection.select_all() while await cursor.fetch_next: cmd_dict['sha256_digest'] = cursor.next_object()['sha256_digest'] cmd_d = schema.CommandSchema().load(cmd_dict) documents += [await route_support.queue_command(cmd_d)] break elif sha.lower()[:4] == 'all:': ft = sha.lower().split(':')[1] if ft == 'file': ft = enums.FileType.FILE elif ft == 'memory': ft = enums.FileType.MEMORY cursor = db.async_file_collection.select_many(file_type=ft) while await cursor.fetch_next: cmd_dict['sha256_digest'] = cursor.next_object()['sha256_digest'] cmd_d = schema.CommandSchema().load(cmd_dict) documents += [await route_support.queue_command(cmd_d)] break else: cmd_dict['sha256_digest'] = sha cmd_d = schema.CommandSchema().load(cmd_dict) documents += [await route_support.queue_command(cmd_d)] # Dump and finish documents = schema.CommandSchema(many=True).load(documents) documents = schema.CommandSchema(many=True).dump(documents) self.jsonify({"commands": documents}) self.finish() CommandRoute = (r"/command", CommandHandler) # pylint: disable=invalid-name CommandsRoute = (r"/commands", CommandsHandler) # pylint: disable=invalid-name
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,711
TheoKlein/snake-core
refs/heads/master
/snake/scales/url/upload.py
# pylint: disable=missing-docstring from os import path from urllib import parse import cgi import requests from snake import config from snake import error from snake import fields from snake import scale PROXIES = {} if config.snake_config['http_proxy']: PROXIES['http'] = config.snake_config['http_proxy'] if config.snake_config['https_proxy']: PROXIES['https'] = config.snake_config['https_proxy'] HEADERS = { "Accept-Encoding": "gzip, deflate", "User-Agent": config.constants.USER_AGENT } class Upload(scale.Upload): def arguments(self): return { 'url': fields.Str(required=True) } def info(self): return "fetches files from arbitrary URLs and uploads them to Snake" def upload(self, args, working_dir): url_parser = parse.urlparse(args['url']) if not url_parser.scheme: url_parser = parse.urlparse('http://' + args['url']) req = requests.get(url_parser.geturl(), headers=HEADERS, proxies=PROXIES, stream=True, timeout=300) if not req.status_code == requests.codes.ok: # pylint: disable=no-member raise error.UploadError('HTTP Error: %s - %s' % (req.status_code, req.reason)) name = None if 'Content-Disposition' in req.headers: _, params = cgi.parse_header(req.headers['Content-Disposition']) if 'filename' in params: name = params['filename'] if not name: name = args['url'].split('/')[-1] with open(path.join(working_dir, name), 'wb') as f: for chunk in req.iter_content(chunk_size=4096): if chunk: f.write(chunk) return name
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,712
TheoKlein/snake-core
refs/heads/master
/snake/engines/mongo/note.py
"""The Mongo Note Collection Module. This module provides everything required to communicate with the Mongo NoteCollection. """ class NoteCollection(): """Synchronous Note Collection. Attributes: db (obj): The database object """ def __init__(self, db): self.db = db def delete(self, sha256_digest): """Delete note. Args: sha256_digest (str): The hash of the file. """ return self.db.notes.delete_many({"sha256_digest": sha256_digest}) def insert(self, document): """Insert note. Args: document (:obj:CommandSchema): The note to insert. Returns: :obj:`CommandSchema`: The inserted note. """ return self.db.notes.insert_one(document) def select(self, sha256_digest): """Select note. Args: sha256_digest (str): The hash of the file. Returns: :obj:`CommandSchema`: The selected note. """ return self.db.notes.find_one({"sha256_digest": sha256_digest}) def select_many(self, sha256_digest): """Select notes. Args: sha256_digest (str): The hash of the file. Returns: :obj:`Cursor`: The mongodb cursor. """ return self.db.notes.find({"sha256_digest": sha256_digest}) def select_all(self, filter_=None): """Select all notes. Args: filter_ (dict): The filter. Defaults to None. Returns: :obj:`Cursor`: The mongodb cursor. """ if filter_: return self.db.notes.find(filter_) return self.db.notes.find() def update(self, sha256_digest, data): """Update note. Args: sha256_digest (str): The hash of the file. data (:obj:`CommandSchema): The update data. Returns: :obj:`CommandSchema`: The updated note. """ return self.db.notes.update_one({"sha256_digest": sha256_digest}, {'$set': data}) class AsyncNoteCollection(): """Asynchronous Note Collection. Attributes: db (obj): The database object """ def __init__(self, db): self.db = db def delete(self, sha256_digest, callback=None): """Delete note. Args: sha256_digest (str): The hash of the file. callback (func, optional): The callback function. Defaults to None. """ future = self.db.notes.delete_many({"sha256_digest": sha256_digest}) if callback: future.add_done_callback(callback) return future def insert(self, document, callback=None): """Insert note. Args: document (:obj:CommandSchema): The note to insert. callback (func, optional): The callback function. Defaults to None. Returns: :obj:`CommandSchema`: The inserted note. """ future = self.db.notes.insert_one(document) if callback: future.add_done_callback(callback) return future def select(self, sha256_digest, callback=None): """Select note. Args: sha256_digest (str): The hash of the file. callback (func, optional): The callback function. Defaults to None. Returns: :obj:`CommandSchema`: The selected note. """ future = self.db.notes.find_one({"sha256_digest": sha256_digest}) if callback: future.add_done_callback(callback) return future def select_many(self, sha256_digest): """Select notes. Args: sha256_digest (str): The hash of the file. Returns: :obj:`Cursor`: The mongodb cursor. """ return self.db.notes.find({"sha256_digest": sha256_digest}) def select_all(self, filter_=None): """Select all notes. Args: filter_ (dict): The filter. Defaults to None. Returns: :obj:`Cursor`: The mongodb cursor. """ if filter_: return self.db.notes.find(filter_) return self.db.notes.find() def update(self, sha256_digest, data, callback=None): """Update note. Args: sha256_digest (str): The hash of the file. data (:obj:`CommandSchema): The update data. callback (func, optional): The callback function. Defaults to None. Returns: :obj:`CommandSchema`: The updated note. """ future = self.db.notes.update_one({"sha256_digest": sha256_digest}, {'$set': data}) if callback: future.add_done_callback(callback) return future
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,713
TheoKlein/snake-core
refs/heads/master
/snake/fields.py
"""The fields module. This replicates and extends the marshmallow fields module. NOTEs: * We are bringing fields into our namespace. TODOs: * Extend all fields with a valid arguments option. """ # pylint: disable=arguments-differ # pylint: disable=wildcard-import # pylint: disable=unused-wildcard-import import marshmallow.exceptions import marshmallow.fields from marshmallow.fields import __all__ # noqa from marshmallow.utils import missing class SnakeField: def __init__(self, *args, **kwargs): # Get our kwargs, handle them, remove them, pass it on # TODO: We should support ranges for number based items which # marshmallow alread supports, we just need to expose in to_dict if 'values' in kwargs: self.__values = kwargs['values'] del kwargs['values'] kwargs['validate'] = self.values_validator else: self.__values = [] super().__init__(*args, **kwargs) def has_default(self): return type(self.default) is not type(missing) @property def values(self): if hasattr(self.__values, '__call__'): return self.__values() else: return self.__values def values_validator(self, value): if value not in self.values: raise marshmallow.exceptions.ValidationError("'%s' must be in '%s'" % (value, self.values)) def to_dict(self): # Resolve Aliases: # URL = Url # Str = String # Bool = Boolean # Int = Integer type_ = type(self).__name__ if type_ is 'Str': type_ = 'string' elif type_ is 'Bool': type_ = 'boolean' elif type_ is 'Int': type_ = 'integer' else: type_.lower() default = self.default if type(self.default) is not type(missing) else None return { 'default': default, 'required': self.required, 'type': type_, 'values': self.values if self.values else None } # This is a bit grim, but we can dynamically extend all Marshmallow field objects for field in __all__: ignore = ['Dict', 'Field'] if field not in ignore: cls = getattr(marshmallow.fields, field) globals()[field] = type(field, (SnakeField,cls,), {}) else: cls = getattr(marshmallow.fields, field) globals()[field] = type(field, (cls,), {}) # Fields class Enum(Str): # noqa """The enum field. This adds the `type` fields that is used to set the type of the enum and used for validation. Attributes: enum_type (:obj:`IterableType`): The enum type for the field. """ def __init__(self, *args, **kwargs): if 'type' in kwargs: enum_type = kwargs.pop('type') else: enum_type = args[:-1] # super().__init__(self, *args, **kwargs) # FIXME: Causes a recursion Error Str.__init__(self, *args, **kwargs) # noqa self.enum_type = enum_type if not self.validators: self.validators = [self.validate_type] def validate_type(self, value): """The validation method. This checks that the value is indeed in the enum and therefore checks validity. """ if value in self.enum_type: return True return False class ObjectId(Str): # noqa """The object id field. This is used to handle Mongo's object id field. """ def _deserialize(self, val, attr, data): return str(val)
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,714
TheoKlein/snake-core
refs/heads/master
/snake/error.py
"""The errors module. All of the custom exceptions used within snake. """ class SnakeError(Exception): """The base error class for snake. This contains a message, an HTTP status code, and a payload (additional data). Attributes: message (str): The error message. status_code (int): The HTTP status code. payload (:obj:): Any additional data. """ def __init__(self, message, status_code=None, payload=None): super().__init__(message) self.message = message self.status_code = status_code self.payload = payload # 200 # TODO: Make 200 class CommandWarning(SnakeError): """The command warning exception. This should be used when a warning needs to be reported from within the scale's command component. """ def __init__(self, message): SnakeError.__init__(self, message, 500) class InterfaceWarning(SnakeError): """The interface warning exception. This should be used when a warning needs to be reported from within the scale's interface component. """ def __init__(self, message): SnakeError.__init__(self, message, 500) # 500 class CommandError(SnakeError): """The command error exception. This should be used when an error needs to be reported from within the scale's command component. """ def __init__(self, message): SnakeError.__init__(self, message, 500) class InterfaceError(SnakeError): """The interface error exception. This should be used when an error needs to be reported from within the scale's interface component. """ def __init__(self, message): SnakeError.__init__(self, message, 500) class ScaleError(SnakeError): """The scale error exception. This should be used when a generic error needs to be reported from within a scale. """ def __init__(self, message): SnakeError.__init__(self, message, 500) class UploadError(ScaleError): """The upload error exception. This should be used when an error needs to be reported from within the scale's upload component. """ def __init__(self, message): ScaleError.__init__(self, message) class MongoError(SnakeError): """The mongo error exception. This should be used when an error needs to be reported that is related to mongo. """ def __init__(self, message): SnakeError.__init__(self, message, 500) class ServerError(SnakeError): """The server error exception. This should be used when an error needs to be reported that is related to server side problems, such as missing files. """ def __init__(self, message): SnakeError.__init__(self, message, 500)
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,715
TheoKlein/snake-core
refs/heads/master
/snake/scales/hashes/__init__.py
# pylint: disable=missing-docstring from snake.config import constants from snake.scale import scale __scale__ = scale( name='hashes', description='a module to calculate hashes on files', version=constants.VERSION, author="Countercept", supports=[ ] )
{"/snake/core/route_support.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/tests/core/test_route_support.py": ["/snake/config/__init__.py"], "/snake/snaked.py": ["/snake/config/__init__.py", "/snake/core/celery.py", "/snake/core/route_manager.py", "/snake/core/snake_handler.py"], "/snake/routes/download.py": ["/snake/db.py", "/snake/utils/__init__.py"], "/snake/db.py": ["/snake/config/__init__.py"], "/snake/core/scale_manager.py": ["/snake/config/__init__.py"], "/tests/test_scale.py": ["/snake/error.py"], "/snake/utils/submitter.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"], "/snake/snake_utility.py": ["/snake/config/__init__.py"], "/snake/utils/file_storage.py": ["/snake/config/__init__.py"], "/snake/config/config.py": ["/snake/config/__init__.py"], "/snake/core/celery.py": ["/snake/config/__init__.py"], "/snake/scales/hashes/commands.py": ["/snake/utils/__init__.py"], "/snake/scales/strings/commands.py": ["/snake/scales/strings/__init__.py"], "/snake/routes/api.py": ["/snake/config/__init__.py"], "/snake/scale.py": ["/snake/enums.py"], "/tests/utils/test_markdown.py": ["/snake/utils/__init__.py"], "/tests/config/test_config.py": ["/snake/config/__init__.py"], "/snake/utils/__init__.py": ["/snake/utils/file_storage.py"], "/snake/core/snake_handler.py": ["/snake/config/__init__.py"], "/snake/worker.py": ["/snake/config/__init__.py", "/snake/core/celery.py"], "/snake/routes/scale.py": ["/snake/config/__init__.py", "/snake/managers.py"], "/snake/routes/command.py": ["/snake/error.py", "/snake/managers.py"], "/snake/scales/hashes/__init__.py": ["/snake/config/__init__.py", "/snake/scale.py"]}
57,716
tdzell/VisionSystem
refs/heads/master
/IDStest.py
from pyueye import ueye from pyueye_example_camera import Camera from pyueye_example_utils import FrameThread import cv2 import numpy as np cam = Camera() cam.init() cam.set_colormode(ueye.IS_CM_BGR8_PACKED) cam.set_aoi(0,0, 1280, 1024) cam.alloc() cam.capture_video() thread = FrameThread(cam, view) thread.start() ''' is_SetBinning is_SetSubSampling is_AOI is_Exposure is_SetFrameRate is_EdgeEnhancement is_Measure (blurriness detection) is_CaptureVideo is_ColorTemperature '''
{"/utils.py": ["/AlarmDetector.py"], "/liveVideoDetect.py": ["/utils.py", "/AlarmDetector.py"]}
57,717
tdzell/VisionSystem
refs/heads/master
/data/BoltPictures/Training_Images/nameconvert.py
import os import glob for f in glob.glob('*.jpg'): new_name = f.replace('276853_D2768 ', "") os.rename(f,new_name)
{"/utils.py": ["/AlarmDetector.py"], "/liveVideoDetect.py": ["/utils.py", "/AlarmDetector.py"]}
57,718
tdzell/VisionSystem
refs/heads/master
/AlarmDetector.py
# Python modules import sharing def GlobeCreate(): ###initializes all global variables that need to be tracked in this module boltCount = 0 global oneBoltSeen oneBoltSeen = 0 global twoBoltSeen twoBoltSeen = 0 global threeBoltSeen threeBoltSeen = 0 global fourBoltSeen fourBoltSeen = 0 outerCount = 0 global oneOuterSeen oneOuterSeen = 0 global twoOuterSeen twoOuterSeen = 0 handleCount = 0 global handleSeen handleSeen = 0 global noBoltSeen noBoltSeen = 0 global counterimage counterimage = 0 def AlarmDetect(DetectedClasses, ClassNames, imgToBeSaved): ###declare that the following variables are global variables, and to not treat their names as new local variables within this function global counterimage boltCount = 0 global oneBoltSeen #Seen variables used to mark how many times within the last X frames a given detection has occured global twoBoltSeen global threeBoltSeen global fourBoltSeen outerCount = 0 global oneOuterSeen global twoOuterSeen handleCount = 0 global handleSeen global noBoltSeen global blurredimg Counter = [[],[]] #initialization of a list so that it can be appended saveimage = False for Name in ClassNames: # initialize structure for the list Counter[0].append(Name) Counter[1].append(0) for DetectedName in DetectedClasses: #count how many times each classid appears in the given array of detected classes Counter[1][DetectedName] = Counter[1][DetectedName] + 1 ### pull the number of detections for classes we care about into human readable variables boltCount = Counter[1][0] outerCount = Counter[1][2] handleCount = Counter[1][1] if boltCount == 0 : noBoltSeen += 1 # if no bolts have been seen, increment this counter | if a judgement needs to occur, it will only happen once no bolts have been seen for X frames if boltCount == 1: oneBoltSeen += 1 #increment this number of bolts seen noBoltSeen = 0 #reset the reset counter | if a judgement needs to occur, it will only happen once no bolts have been seen for X frames if boltCount == 2: twoBoltSeen += 1 noBoltSeen = 0 if boltCount == 3: threeBoltSeen += 1 noBoltSeen = 0 if boltCount == 4: fourBoltSeen += 1 noBoltSeen = 0 if boltCount > 4: #for the current application, no more than 4 bolts should ever possibly be seen at one time print('ERROR: MORE THAN 4 BOLTS SEEN') if outerCount == 1 : oneOuterSeen += 1 noBoltSeen = 0 #reset the reset counter | if a judgement needs to occur, it will only happen once no bolts have been seen for X frames elif outerCount == 2: twoOuterSeen += 1 noBoltSeen = 0 if outerCount > 2: print('ERROR: MORE THAN 2 OUTER SEEN') if handleCount == 1: handleSeen += 1 noBoltSeen = 0 if handleCount > 1: print('ERROR: MORE THAN 1 HANDLE SEEN') print('no bolt: %s' % (noBoltSeen)) colorframe = 'nothing' if (oneBoltSeen + twoBoltSeen + threeBoltSeen + fourBoltSeen) == 1: #once a bolt has been seen at least once, temporarily store the image so that if a decision is made, the image can be permanently saved sharing.holdimg = imgToBeSaved if noBoltSeen > 8: #if no bolts have been seen for X frames, check if alarm needs to be raised, otherwise return to the calling script boltExpected = 0 if twoOuterSeen >= sharing.detect_min: boltExpected += 2 #if two "outer signs" have been detected, there should be two associated bolts that were also seen elif oneOuterSeen >= sharing.detect_min: boltExpected += 1 #if one "outer sign" has been detected, there should be one associated bolt that was also seen if handleSeen >= sharing.detect_min: boltExpected += 2 #if a "handle" has been detected, than there should be two associated bolts that were also seen if fourBoltSeen >= sharing.detect_min: #if four bolts were seen at once for at least 'sharing.detect_min' times, then we assume that four bolts were seen boltSeen = 4 elif threeBoltSeen >= sharing.detect_min: #etc for three bolts boltSeen = 3 elif twoBoltSeen >= sharing.detect_min: #etc for two bolts boltSeen = 2 elif oneBoltSeen >= sharing.detect_min: #etc for one bolt boltSeen = 1 else: boltSeen = 0 #else no bolts were seen if boltExpected != 0: if boltSeen > boltExpected: print('||CONFUSED:, %s bolts seen but %s expected' % (boltSeen, boltExpected)) sharing.saveimage = True #mark that the held image should be saved to file by the calling script sharing.savefolder = 'falsepositives' sharing.colorframe = 'yellow' #mark that a yellow screen should be returned instead of normal detection feed if boltSeen == boltExpected: print('VERIFIED, %s bolts expected' % (boltExpected)) sharing.saveimage = True #mark that the held image should be saved to file by the calling script sharing.savefolder = 'verified' sharing.colorframe = 'green' #mark that a green screen should be returned instead of normal detection feed if boltSeen < boltExpected: print('**ALARM**, %s bolts seen but %s expected' % (boltSeen, boltExpected)) sharing.saveimage = True #mark that the held image should be saved to file by the calling script sharing.savefolder = 'falsepositives' sharing.colorframe = 'red' #mark that a red screen should be returned instead of normal detection feed else: sharing.colorframe = 'nothing' noBoltSeen = 0 #reset all global variables in preperation for the next decision that is made oneBoltSeen = 0 twoBoltSeen = 0 threeBoltSeen = 0 fourBoltSeen = 0 oneOuterSeen = 0 twoOuterSeen = 0 handleSeen = 0 return colorframe, saveimage #return whether the image should be overwritten with a solid color, and whether the temporarily stored image should be permanently saved
{"/utils.py": ["/AlarmDetector.py"], "/liveVideoDetect.py": ["/utils.py", "/AlarmDetector.py"]}
57,719
tdzell/VisionSystem
refs/heads/master
/utils.py
# Python default libraries import os from math import exp, ceil, floor from copy import copy import time # Python external libraries import torch import numpy as np from torch.autograd import Variable import cv2 import struct # get_image_size import imghdr # get_image_size import sharing # Python modules import AlarmDetector def createglobal(): sharing.counterimage = 0 def sigmoid(x): return 1.0/(exp(-x)+1.) def softmax(x): x = torch.exp(x - torch.max(x)) x = x/x.sum() return x def bbox_iou(box1, box2): mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0) Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0) my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0) My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0) w1 = box1[2] h1 = box1[3] w2 = box2[2] h2 = box2[3] uw = Mx - mx uh = My - my cw = w1 + w2 - uw ch = h1 + h2 - uh carea = 0 if cw <= 0 or ch <= 0: return 0.0 area1 = w1 * h1 area2 = w2 * h2 carea = cw * ch uarea = area1 + area2 - carea return carea/uarea def bbox_ious(boxes1, boxes2, x1y1x2y2=True): if x1y1x2y2: mx = torch.min(boxes1[0], boxes2[0]) Mx = torch.max(boxes1[2], boxes2[2]) my = torch.min(boxes1[1], boxes2[1]) My = torch.max(boxes1[3], boxes2[3]) w1 = boxes1[2] - boxes1[0] h1 = boxes1[3] - boxes1[1] w2 = boxes2[2] - boxes2[0] h2 = boxes2[3] - boxes2[1] else: mx = torch.min(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0) Mx = torch.max(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0) my = torch.min(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0) My = torch.max(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0) w1 = boxes1[2] h1 = boxes1[3] w2 = boxes2[2] h2 = boxes2[3] uw = Mx - mx uh = My - my cw = w1 + w2 - uw ch = h1 + h2 - uh mask = ((cw <= 0) + (ch <= 0) > 0) area1 = w1 * h1 area2 = w2 * h2 carea = cw * ch carea[mask] = 0 uarea = area1 + area2 - carea return carea/uarea def nms(boxes, nms_thresh): if len(boxes) == 0: return boxes det_confs = torch.zeros(len(boxes)) for i in range(len(boxes)): det_confs[i] = 1-boxes[i][4] _,sortIds = torch.sort(det_confs) out_boxes = [] for i in range(len(boxes)): box_i = boxes[sortIds[i]] if box_i[4] > 0: out_boxes.append(box_i) for j in range(i+1, len(boxes)): box_j = boxes[sortIds[j]] if bbox_iou(box_i, box_j) > nms_thresh: box_j[4] = 0 return out_boxes def convert2cpu(gpu_matrix): return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix) def convert2cpu_long(gpu_matrix): return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix) def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False): anchor_step = len(anchors)//num_anchors if output.dim() == 3: output = output.unsqueeze(0) batch = output.size(0) assert(output.size(1) == (5+num_classes)*num_anchors) h = output.size(2) w = output.size(3) all_boxes = [] output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w) if sharing.usegpu == True: grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda() grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda() else: grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cpu() grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cpu() xs = torch.sigmoid(output[0]) + grid_x ys = torch.sigmoid(output[1]) + grid_y anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0])) anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1])) if sharing.usegpu == True: anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda() anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda() else: anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cpu() anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cpu() ws = torch.exp(output[2]) * anchor_w hs = torch.exp(output[3]) * anchor_h det_confs = torch.sigmoid(output[4]) cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data cls_max_confs, cls_max_ids = torch.max(cls_confs, 1) cls_max_confs = cls_max_confs.view(-1) cls_max_ids = cls_max_ids.view(-1) sz_hw = h*w sz_hwa = sz_hw*num_anchors det_confs = convert2cpu(det_confs) cls_max_confs = convert2cpu(cls_max_confs) cls_max_ids = convert2cpu_long(cls_max_ids) xs = convert2cpu(xs) ys = convert2cpu(ys) ws = convert2cpu(ws) hs = convert2cpu(hs) if validation: cls_confs = convert2cpu(cls_confs.view(-1, num_classes)) for b in range(batch): boxes = [] for cy in range(h): for cx in range(w): for i in range(num_anchors): ind = b*sz_hwa + i*sz_hw + cy*w + cx det_conf = det_confs[ind] if only_objectness: conf = det_confs[ind] else: conf = det_confs[ind] * cls_max_confs[ind] if conf > conf_thresh: bcx = xs[ind] bcy = ys[ind] bw = ws[ind] bh = hs[ind] cls_max_conf = cls_max_confs[ind] cls_max_id = cls_max_ids[ind] box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id] if (not only_objectness) and validation: for c in range(num_classes): tmp_conf = cls_confs[ind][c] if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh: box.append(tmp_conf) box.append(c) boxes.append(box) all_boxes.append(boxes) return all_boxes def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None): imgToBeSaved = copy(img) saveimage = False colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]); def get_color(c, x, max_val): ratio = float(x)/max_val * 5 i = int(floor(ratio)) j = int(ceil(ratio)) ratio = ratio - i r = (1-ratio) * colors[i][c] + ratio*colors[j][c] return int(r*255) Detected = [] width = img.shape[1] height = img.shape[0] for i in range(len(boxes)): box = boxes[i] x1 = int(round((box[0] - box[2]/2.0) * width)) y1 = int(round((box[1] - box[3]/2.0) * height)) x2 = int(round((box[0] + box[2]/2.0) * width)) y2 = int(round((box[1] + box[3]/2.0) * height)) Detected.append(box[6]) #create a list to be passed of all detections if color: rgb = color else: rgb = (255, 0, 0) if len(box) >= 7 and class_names: cls_conf = box[5] cls_id = box[6] if cls_id > 6: cls_id = 6 print('%s: %f' % (class_names[cls_id], cls_conf)) classes = len(class_names) offset = cls_id * 123457 % classes red = get_color(2, offset, classes) green = get_color(1, offset, classes) blue = get_color(0, offset, classes) if color is None: rgb = (red, green, blue) img = cv2.putText(img, class_names[cls_id], (x1,y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1) img = cv2.rectangle(img, (x1,y1), (x2,y2), rgb, 1) if Detected: AlarmDetector.AlarmDetect(Detected, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], imgToBeSaved) #if there were detections, pass them to AlarmDetector.py else: AlarmDetector.AlarmDetect([19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], imgToBeSaved) #AlarmDetector.py still needs to be called even if no detections occured, "no value" doesn't like to be passed, so an irrelevant class detection is passed if sharing.saveimage == True: #if the held image should be saved to file, than do so cv2.imwrite('%s/%s.jpg' % (sharing.savefolder, sharing.counterimage), sharing.holdimg) sharing.saveimage = False sharing.counterimage += 1 #keeps the names unique for each successive detection in the file | will overwrite names if sharing.colorframe == 'red': img = cv2.rectangle(img, (0,0), (1920,1080), (0, 0, 255), thickness = -1) #turn the image frame red sharing.colorframe = 'nothing' waitsignal = True #signal to pause once the colored image frame is displayed elif sharing.colorframe == 'yellow': img = cv2.rectangle(img, (0,0), (1920,1080), (0, 255, 255), thickness = -1) #turn the image frame yellow sharing.colorframe = 'nothing' waitsignal = True elif sharing.colorframe == 'green': img = cv2.rectangle(img, (0,0), (1920,1080), (0, 255, 0), thickness = -1) #turn the image frame green sharing.colorframe = 'nothing' waitsignal = True else: waitsignal = False if savename: print("save plot results to %s" % savename) cv2.imwrite(savename, img) return img, waitsignal def read_truths(lab_path): if not os.path.exists(lab_path): return np.array([]) if os.path.getsize(lab_path): truths = np.loadtxt(lab_path) truths = truths.reshape(truths.size/5, 5) # to avoid single truth problem return truths else: return np.array([]) def read_truths_args(lab_path, min_box_scale): truths = read_truths(lab_path) new_truths = [] for i in range(truths.shape[0]): if truths[i][3] < min_box_scale: continue new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4]]) return np.array(new_truths) def load_class_names(namesfile): class_names = [] with open(namesfile, 'r') as fp: lines = fp.readlines() for line in lines: line = line.rstrip() class_names.append(line) return class_names def image2torch(img): width = img.width height = img.height img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes())) img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous() img = img.view(1, 3, height, width) img = img.float().div(255.0) return img def do_detect(model, img, conf_thresh, nms_thresh, usecuda): model.eval() img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0) if usecuda: img = img.cuda() img = torch.autograd.Variable(img) else: img = torch.autograd.Variable(img).cpu() output = model(img) output = output.data boxes = get_region_boxes(output, conf_thresh, model.num_classes, model.anchors, model.num_anchors)[0] boxes = nms(boxes, nms_thresh) return boxes def read_data_cfg(datacfg): options = dict() options['gpus'] = '0,1,2,3' options['num_workers'] = '10' with open(datacfg, 'r') as fp: lines = fp.readlines() for line in lines: line = line.strip() if line == '': continue key,value = line.split('=') key = key.strip() value = value.strip() options[key] = value return options def scale_bboxes(bboxes, width, height): import copy dets = copy.deepcopy(bboxes) for i in range(len(dets)): dets[i][0] = dets[i][0] * width dets[i][1] = dets[i][1] * height dets[i][2] = dets[i][2] * width dets[i][3] = dets[i][3] * height return dets def file_lines(thefilepath): count = 0 thefile = open(thefilepath, 'rb') while True: buffer = thefile.read(8192*1024) if not buffer: break count += buffer.count(b'\n') thefile.close( ) return count def get_image_size(fname): '''Determine the image type of fhandle and return its size. from draco''' with open(fname, 'rb') as fhandle: head = fhandle.read(24) if len(head) != 24: return if imghdr.what(fname) == 'png': check = struct.unpack('>i', head[4:8])[0] if check != 0x0d0a1a0a: return width, height = struct.unpack('>ii', head[16:24]) elif imghdr.what(fname) == 'gif': width, height = struct.unpack('<HH', head[6:10]) elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg': try: fhandle.seek(0) # Read 0xff next size = 2 ftype = 0 while not 0xc0 <= ftype <= 0xcf: fhandle.seek(size, 1) byte = fhandle.read(1) while ord(byte) == 0xff: byte = fhandle.read(1) ftype = ord(byte) size = struct.unpack('>H', fhandle.read(2))[0] - 2 # We are at a SOFn block fhandle.seek(1, 1) # Skip `precision' byte. height, width = struct.unpack('>HH', fhandle.read(4)) except Exception: #IGNORE:W0703 return else: return return width, height def logging(message): print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
{"/utils.py": ["/AlarmDetector.py"], "/liveVideoDetect.py": ["/utils.py", "/AlarmDetector.py"]}
57,720
tdzell/VisionSystem
refs/heads/master
/liveVideoDetect.py
from utils import * from darknet import Darknet import cv2 import AlarmDetector from pyueye import ueye #importing this too early would require IDS camera drivers to be installed just to run the "StandardCamera" code from pyueye_example_camera import Camera from pyueye_example_utils import * import numpy as np import multiprocessing from multiprocessing import Queue, Pool from threading import Thread from ctypes import byref import sharing import time def IDSCamera(cfgfile, weightfile, useGPU): ### IDS camera initializations cam = Camera() cam.init() cam.set_colormode(ueye.IS_CM_BGR8_PACKED) cam.alloc() cam.capture_video() input_q = Queue(8) output_q = Queue(8) ### startup of thread that pulls image frames from the IDS camera thread = FrameThread(cam, 1, cfgfile, weightfile, useGPU, input_q, output_q) thread.start() loop = True m = Darknet(cfgfile) ### initialization for creation of a .avi file for sharing of proof of concept fourcc = cv2.VideoWriter_fourcc(*'DIVX') out = cv2.VideoWriter('output.avi',fourcc,5.0,(480, 360)) if m.num_classes == 20: namesfile = 'data/voc.names' elif m.num_classes == 80: namesfile = 'data/coco.names' else: namesfile = 'data/names' class_names = load_class_names(namesfile) num_workers = 2 pool = Pool(num_workers, IDS_worker, (input_q, output_q, cfgfile, weightfile, useGPU)) while loop: cv2.waitKey(10) image, bboxes = output_q.get() print('------') draw_img, waitsignal = plot_boxes_cv2(image, bboxes, None, class_names) #draw boxes associated with detections onto the base images | AlarmDetection.py is called in here cv2.imshow('cfgfile', draw_img) #show the image frame that now has detections drawn onto it | draw_image will be entirely green/yellow/red after a judgement is made by AlarmDetection.py for verification or alarm '''uncomment the following line to record video | file is named output.avi and will overwrite any existing files with same name''' #out.write(draw_img) if waitsignal == True: cv2.waitKey(2000) waitsignal = False if cv2.waitKey(1) & 0xFF == ord('q'): loop = False out.release() cv2.destroyAllWindows() thread.stop() thread.join() print('join') pool.terminate() print('terminate') cam.stop_video() print('stop_video') cam.exit() print('cam exit') break print('IDS_Camera close') def IDS_worker(input_q, output_q, cfgfile, weightfile, useGPU): sharing.detect_min = 3 sharing.colorframe = 'nothing' sharing.saveimage = False sharing.counterimage = 0 if useGPU: sharing.usegpu = True else: sharing.usegpu = False ### initialization of neural network based upon the specified config and weights files m = Darknet(cfgfile) m.print_network() m.load_weights(weightfile) ### if GPU optimizations are enabled, do some initialization if sharing.usegpu: m.cuda() print('Loading weights from %s... Done!' % (weightfile)) while True: image = input_q.get() sized = cv2.resize(image, (m.width, m.height)) #third value in this call sets the confidence threshold for object detection output_q.put((image, do_detect(m, sized, 0.4, 0.4, useGPU))) #out.write(draw_img) def StandardCamera(cfgfile, weightfile, useGPU): m = Darknet(cfgfile) ### initialization for creation of a .avi file for sharing of proof of concept fourcc = cv2.VideoWriter_fourcc(*'DIVX') out = cv2.VideoWriter('output.avi',fourcc,4.0,(640,480)) ### initialization of pulling image frames from generic USB camera using openCV cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) cap.set(cv2.CAP_PROP_AUTOFOCUS, 0) if not cap.isOpened(): print("Unable to open camera") exit(-1) #################### num_workers = 2 input_q = Queue(4) output_q = Queue(4) res, img = cap.read() input_q.put(img) pool = Pool(num_workers, Standard_worker, (input_q, output_q, cfgfile, weightfile, useGPU)) if m.num_classes == 20: namesfile = 'data/voc.names' elif m.num_classes == 80: namesfile = 'data/coco.names' else: namesfile = 'data/names' class_names = load_class_names(namesfile) while True: res, img = cap.read() input_q.put(img) img, bboxes = output_q.get() print('------') draw_img, waitsignal = plot_boxes_cv2(img, bboxes, None, class_names) #draw boxes associated with detections onto the base images | AlarmDetection.py is called in here cv2.imshow('cfgfile', draw_img) #show the image frame that now has detections drawn onto it | draw_image will be entirely green/yellow/red after a judgement is made by AlarmDetection.py for verification or alarm '''uncomment the following line to record video | file is named output.avi and will overwrite any existing files with same name''' #out.write(draw_img) if waitsignal == True: #if green/yellow/red screen is being shown by draw_img, leave it in place for two seconds instead of continuing detections cv2.waitKey(2000) waitsignal = False cv2.waitKey(3) #neccessary to ensure this loop does not attempt to pull new images from the USB camera too quickly time.sleep(0.05) # if cv2.waitKey(1) & 0xFF == ord('q'): # break pool.terminate() cap.stop() cv2.destroyAllWindows() ##################### def Standard_worker(input_q, output_q, cfgfile, weightfile, useGPU): sharing.detect_min = 3 sharing.colorframe = 'nothing' sharing.saveimage = False sharing.counterimage = 0 if useGPU: sharing.usegpu = True else: sharing.usegpu = False ### initialization of neural network based upon the specified config and weights files m = Darknet(cfgfile) m.print_network() m.load_weights(weightfile) ### if GPU optimizations are enabled, do some initialization if sharing.usegpu: m.cuda() print('Loading weights from %s... Done!' % (weightfile)) while True: img = input_q.get() sized = cv2.resize(img, (m.width, m.height)) #resize the image frame pulled into the size expecteed by the detection model #bboxes = do_detect(m, sized, 0.4, 0.4, useGPU) #third value in this call sets the confidence needed to detect object output_q.put((img, do_detect(m, sized, 0.4, 0.4, useGPU))) ################################################# ########################################################### class FrameThread(Thread): def __init__(self, cam, views, cfgfile, weightfile, useGPU, input_q, output_q, copy=True): super(FrameThread, self).__init__() self.timeout = 1000 self.cam = cam self.running = True self.views = views self.copy = copy self.m = Darknet(cfgfile) self.m.print_network() self.m.load_weights(weightfile) self.useGPU = useGPU sharing.usegpu = useGPU sharing.loop = True self.input_q = input_q self.output_q = output_q if self.m.num_classes == 20: namesfile = 'data/voc.names' elif self.m.num_classes == 80: namesfile = 'data/coco.names' else: namesfile = 'data/names' self.m.class_names = load_class_names(namesfile) if self.useGPU: self.m.cuda() print('Loading weights from %s... Done!' % (weightfile)) def run(self): while self.running: img_buffer = ImageBuffer() ret = ueye.is_WaitForNextImage(self.cam.handle(), self.timeout, img_buffer.mem_ptr, img_buffer.mem_id) if ret == ueye.IS_SUCCESS: self.notify(ImageData(self.cam.handle(), img_buffer)) cv2.waitKey(100) time.sleep(0.01) def notify(self, image_data): if self.views: if type(self.views) is not list: self.views = [self.views] for view in self.views: image = image_data.as_1d_image() image_data.unlock() self.input_q.put(image) def stop(self): self.running = False ############################################ if __name__ == '__main__': AlarmDetector.GlobeCreate() #initializes module level global counters for AlarmDetector.py ### initialization of program level global variables for: configuration; saving of "falsepositive" images sharing.detect_min = 3 sharing.colorframe = 'nothing' sharing.saveimage = False sharing.counterimage = 0 ### exactly four arguments must be present after calling this python script in command prompt for the rest of the script to run if len(sys.argv) == 5: cfgfile = sys.argv[1] #pulling the arguments given from command prompt weightfile = sys.argv[2] cpuGPU = sys.argv[3] cameraUsage = sys.argv[4] if cpuGPU == 'GPU': useGPU = True else: useGPU = False if useGPU: sharing.usegpu = True else: sharing.usegpu = False if cameraUsage == 'IDS': #If "IDS" is the final argument given, use the IDS Camera code, otherwise use the generic USB camera code IDSCamera(cfgfile, weightfile, useGPU) else: StandardCamera(cfgfile, weightfile, useGPU) #demo('cfg/tiny-yolo-voc.cfg', 'tiny-yolo-voc.weights') else: print('Usage:') print(' python demo.py cfgfile weightfile') print('') print(' perform detection on camera')
{"/utils.py": ["/AlarmDetector.py"], "/liveVideoDetect.py": ["/utils.py", "/AlarmDetector.py"]}
57,721
farizrahman4u/skorch
refs/heads/master
/skorch/tests/callbacks/test_all.py
import itertools import pytest class TestAllCallbacks: @pytest.fixture def callbacks(self): """Return all callbacks""" import skorch.callbacks callbacks = [] for name in dir(skorch.callbacks): attr = getattr(skorch.callbacks, name) # pylint: disable=unidiomatic-typecheck if not type(attr) is type: continue if issubclass(attr, skorch.callbacks.Callback): callbacks.append(attr) return callbacks @pytest.fixture def base_cls(self): from skorch.callbacks import Callback return Callback @pytest.fixture def on_x_methods(self): return [ 'on_train_begin', 'on_train_end', 'on_epoch_begin', 'on_epoch_end', 'on_batch_begin', 'on_batch_end', 'on_grad_computed', ] def test_on_x_methods_have_kwargs(self, callbacks, on_x_methods): import inspect for callback, method_name in itertools.product( callbacks, on_x_methods): method = getattr(callback, method_name) assert "kwargs" in inspect.signature(method).parameters def test_set_params_with_unknown_key_raises(self, base_cls): with pytest.raises(ValueError) as exc: base_cls().set_params(foo=123) # TODO: check error message more precisely, depending on what # the intended message shouldb e from sklearn side assert exc.value.args[0].startswith('Invalid parameter foo for')
{"/skorch/tests/test_regressor.py": ["/skorch/exceptions.py"], "/skorch/tests/test_history.py": ["/skorch/history.py"]}
57,722
farizrahman4u/skorch
refs/heads/master
/skorch/history.py
"""Contains history class and helper functions.""" import json from skorch.utils import open_file_like # pylint: disable=invalid-name class _none: """Special placeholder since ``None`` is a valid value.""" def _not_none(items): """Whether the item is a placeholder or contains a placeholder.""" if not isinstance(items, (tuple, list)): items = (items,) return all(item is not _none for item in items) def _filter_none(items): """Filter special placeholder value, preserves sequence type.""" type_ = list if isinstance(items, list) else tuple return type_(filter(_not_none, items)) def _getitem(item, i): """Extract value or values from dicts. Covers the case of a single key or multiple keys. If not found, return placeholders instead. """ if not isinstance(i, (tuple, list)): return item.get(i, _none) type_ = list if isinstance(item, list) else tuple return type_(item.get(j, _none) for j in i) def _unpack_index(i): """Unpack index and return exactly four elements. If index is more shallow than 4, return None for trailing dimensions. If index is deeper than 4, raise a KeyError. """ if len(i) > 4: raise KeyError( "Tried to index history with {} indices but only " "4 indices are possible.".format(len(i))) # fill trailing indices with None i_e, k_e, i_b, k_b = i + tuple([None] * (4 - len(i))) return i_e, k_e, i_b, k_b class History(list): """History contains the information about the training history of a :class:`.NeuralNet`, facilitating some of the more common tasks that are occur during training. When you want to log certain information during training (say, a particular score or the norm of the gradients), you should write them to the net's history object. It is basically a list of dicts for each epoch, that, again, contains a list of dicts for each batch. For convenience, it has enhanced slicing notation and some methods to write new items. To access items from history, you may pass a tuple of up to four items: 1. Slices along the epochs. 2. Selects columns from history epochs, may be a single one or a tuple of column names. 3. Slices along the batches. 4. Selects columns from history batchs, may be a single one or a tuple of column names. You may use a combination of the four items. If you select columns that are not present in all epochs/batches, only those epochs/batches are chosen that contain said columns. If this set is empty, a ``KeyError`` is raised. Examples -------- >>> # ACCESSING ITEMS >>> # history of a fitted neural net >>> history = net.history >>> # get current epoch, a dict >>> history[-1] >>> # get train losses from all epochs, a list of floats >>> history[:, 'train_loss'] >>> # get train and valid losses from all epochs, a list of tuples >>> history[:, ('train_loss', 'valid_loss')] >>> # get current batches, a list of dicts >>> history[-1, 'batches'] >>> # get latest batch, a dict >>> history[-1, 'batches', -1] >>> # get train losses from current batch, a list of floats >>> history[-1, 'batches', :, 'train_loss'] >>> # get train and valid losses from current batch, a list of tuples >>> history[-1, 'batches', :, ('train_loss', 'valid_loss')] >>> # WRITING ITEMS >>> # add new epoch row >>> history.new_epoch() >>> # add an entry to current epoch >>> history.record('my-score', 123) >>> # add a batch row to the current epoch >>> history.new_batch() >>> # add an entry to the current batch >>> history.record_batch('my-batch-score', 456) >>> # overwrite entry of current batch >>> history.record_batch('my-batch-score', 789) """ def new_epoch(self): """Register a new epoch row.""" self.append({'batches': []}) def new_batch(self): """Register a new batch row for the current epoch.""" # pylint: disable=invalid-sequence-index self[-1]['batches'].append({}) def record(self, attr, value): """Add a new value to the given column for the current epoch. """ msg = "Call new_epoch before recording for the first time." if not self: raise ValueError(msg) self[-1][attr] = value def record_batch(self, attr, value): """Add a new value to the given column for the current batch. """ # pylint: disable=invalid-sequence-index self[-1]['batches'][-1][attr] = value def to_list(self): """Return history object as a list.""" return list(self) @classmethod def from_file(cls, f): """Load the history of a ``NeuralNet`` from a json file. Parameters ---------- f : file-like object or str """ with open_file_like(f, 'r') as fp: return cls(json.load(fp)) def to_file(self, f): """Saves the history as a json file. In order to use this feature, the history must only contain JSON encodable Python data structures. Numpy and PyTorch types should not be in the history. Parameters ---------- f : file-like object or str """ with open_file_like(f, 'w') as fp: json.dump(self.to_list(), fp) def __getitem__(self, i): # This implementation resolves indexing backwards, # i.e. starting from the batches, then progressing to the # epochs. if isinstance(i, (int, slice)): i = (i,) # i_e: index epoch, k_e: key epoch # i_b: index batch, k_b: key batch i_e, k_e, i_b, k_b = _unpack_index(i) keyerror_msg = "Key '{}' was not found in history." if i_b is not None and k_e != 'batches': raise KeyError("History indexing beyond the 2nd level is " "only possible if key 'batches' is used, " "found key '{}'.".format(k_e)) items = self.to_list() # extract indices of batches # handles: history[..., k_e, i_b] if i_b is not None: items = [row[k_e][i_b] for row in items] # extract keys of batches # handles: history[..., k_e, i_b][k_b] if k_b is not None: items = [ _filter_none([_getitem(b, k_b) for b in batches]) if isinstance(batches, (list, tuple)) else _getitem(batches, k_b) for batches in items ] # get rid of empty batches items = [b for b in items if b not in (_none, [], ())] if not _filter_none(items): # all rows contained _none or were empty raise KeyError(keyerror_msg.format(k_b)) # extract epoch-level values, but only if not already done # handles: history[..., k_e] if (k_e is not None) and (i_b is None): items = [_getitem(batches, k_e) for batches in items] if not _filter_none(items): raise KeyError(keyerror_msg.format(k_e)) # extract the epochs # handles: history[i_b, ..., ..., ...] if i_e is not None: items = items[i_e] if isinstance(i_e, slice): items = _filter_none(items) if items is _none: raise KeyError(keyerror_msg.format(k_e)) return items
{"/skorch/tests/test_regressor.py": ["/skorch/exceptions.py"], "/skorch/tests/test_history.py": ["/skorch/history.py"]}
57,723
farizrahman4u/skorch
refs/heads/master
/skorch/exceptions.py
"""Contains skorch-specific exceptions and warnings.""" class SkorchException(BaseException): """Base skorch exception.""" class NotInitializedError(SkorchException): """Module is not initialized, please call the ``.initialize`` method or train the model by calling ``.fit(...)``. """ class SkorchWarning(UserWarning): """Base skorch warning.""" class DeviceWarning(SkorchWarning): """A problem with a device (e.g. CUDA) was detected."""
{"/skorch/tests/test_regressor.py": ["/skorch/exceptions.py"], "/skorch/tests/test_history.py": ["/skorch/history.py"]}
57,724
farizrahman4u/skorch
refs/heads/master
/skorch/tests/test_regressor.py
"""Tests for regressor.py Only contains tests that are specific for regressor subclasses. """ import numpy as np import pytest from sklearn.base import clone from skorch.tests.conftest import INFERENCE_METHODS class TestNeuralNetRegressor: @pytest.fixture(scope='module') def data(self, regression_data): return regression_data @pytest.fixture(scope='module') def module_cls(self): from skorch.toy import make_regressor return make_regressor(dropout=0.5) @pytest.fixture(scope='module') def net_cls(self): from skorch import NeuralNetRegressor return NeuralNetRegressor @pytest.fixture(scope='module') def net(self, net_cls, module_cls): return net_cls( module_cls, max_epochs=20, lr=0.1, ) @pytest.fixture(scope='module') def multioutput_module_cls(self): from skorch.toy import make_regressor return make_regressor(output_units=3, dropout=0.5) @pytest.fixture(scope='module') def multioutput_net(self, net_cls, multioutput_module_cls): return net_cls( multioutput_module_cls, max_epochs=1, lr=0.1, ) @pytest.fixture(scope='module') def net_fit(self, net, data): # Careful, don't call additional fits on this, since that would have # side effects on other tests. X, y = data return net.fit(X, y) def test_clone(self, net_fit): clone(net_fit) def test_fit(self, net_fit): # fitting does not raise anything pass @pytest.mark.parametrize('method', INFERENCE_METHODS) def test_not_fitted_raises(self, net_cls, module_cls, data, method): from skorch.exceptions import NotInitializedError net = net_cls(module_cls) X = data[0] with pytest.raises(NotInitializedError) as exc: # we call `list` because `forward_iter` is lazy list(getattr(net, method)(X)) msg = ("This NeuralNetRegressor instance is not initialized " "yet. Call 'initialize' or 'fit' with appropriate arguments " "before using this method.") assert exc.value.args[0] == msg def test_net_learns(self, net, net_cls, data, module_cls): X, y = data net = net_cls( module_cls, max_epochs=10, lr=0.1, ) net.fit(X, y) train_losses = net.history[:, 'train_loss'] assert train_losses[0] > 2 * train_losses[-1] def test_history_default_keys(self, net_fit): expected_keys = {'train_loss', 'valid_loss', 'epoch', 'dur', 'batches'} for row in net_fit.history: assert expected_keys.issubset(row) def test_target_1d_raises(self, net, data): X, y = data with pytest.raises(ValueError) as exc: net.fit(X, y.flatten()) assert exc.value.args[0] == ( "The target data shouldn't be 1-dimensional but instead have " "2 dimensions, with the second dimension having the same size " "as the number of regression targets (usually 1). Please " "reshape your target data to be 2-dimensional " "(e.g. y = y.reshape(-1, 1).") def test_predict_predict_proba(self, net_fit, data): X = data[0] y_pred = net_fit.predict(X) # predictions should not be all zeros assert not np.allclose(y_pred, 0) y_proba = net_fit.predict_proba(X) # predict and predict_proba should be identical for regression assert np.allclose(y_pred, y_proba, atol=1e-6) def test_score(self, net_fit, data): X, y = data r2_score = net_fit.score(X, y) assert r2_score <= 1. def test_multioutput_score(self, multioutput_net, multioutput_regression_data): X, y = multioutput_regression_data multioutput_net.fit(X, y) r2_score = multioutput_net.score(X, y) assert r2_score <= 1.
{"/skorch/tests/test_regressor.py": ["/skorch/exceptions.py"], "/skorch/tests/test_history.py": ["/skorch/history.py"]}
57,725
farizrahman4u/skorch
refs/heads/master
/skorch/tests/test_history.py
"""Tests for history.py.""" import pytest from skorch.history import History class TestHistory: test_epochs = 3 test_batches = 4 @pytest.fixture def history(self): """Return a history filled with epoch and batch data.""" h = History() for num_epoch in range(self.test_epochs): h.new_epoch() h.record('duration', 1) h.record('total_loss', num_epoch + self.test_batches) if num_epoch == 2: h.record('extra', 42) for num_batch in range(self.test_batches): h.new_batch() h.record_batch('loss', num_epoch + num_batch) if num_batch % 2 == 0 and (num_epoch + 1) != self.test_epochs: h.record_batch('extra_batch', 23) return h @pytest.fixture def ref(self, history): return history.to_list() def test_list_initialization(self): h = History([1, 2, 3]) assert len(h) == 3 def test_history_length(self, history): assert len(history) == self.test_epochs # we expect to have the extracted batches for each epoch assert len(history[:, 'batches']) == self.test_epochs def test_history_epoch_column(self, history, ref): total_losses = history[:, 'total_loss'] total_losses_ref = [n['total_loss'] for n in ref] assert total_losses == total_losses_ref def test_history_epoch_two_columns(self, history, ref): duration_with_losses = history[:, ('total_loss', 'duration')] total_losses_ref = [n['total_loss'] for n in ref] durations_ref = [n['duration'] for n in ref] expected = list(zip(total_losses_ref, durations_ref)) assert duration_with_losses == expected def test_history_epoch_two_columns_different_order(self, history, ref): duration_with_losses = history[:, ('duration', 'total_loss')] total_losses_ref = [n['total_loss'] for n in ref] durations_ref = [n['duration'] for n in ref] expected = list(zip(durations_ref, total_losses_ref)) assert duration_with_losses == expected def test_history_partial_index(self, history, ref): extra = history[:, 'extra'] assert len(extra) == 1 # we retrieve 'extra' from a slice, therefore we expect a list as result assert extra == [ref[2]['extra']] def test_history_partial_and_full_index(self, history, ref): total_loss_with_extra = history[:, ('total_loss', 'extra')] assert len(total_loss_with_extra) == 1 assert total_loss_with_extra[0][0] == ref[2]['total_loss'] assert total_loss_with_extra[0][1] == ref[2]['extra'] def test_history_partial_join_list(self, history, ref): total = history[:, ['total_loss', 'extra', 'batches']] # there's only one epoch with the 'extra' key. assert len(total) == 1 assert total[0][0] == ref[2]['total_loss'] assert total[0][1] == ref[2]['extra'] assert total[0][2] == ref[2]['batches'] def test_history_retrieve_single_value(self, history, ref): total_loss_0 = history[0, 'total_loss'] assert total_loss_0 == ref[0]['total_loss'] def test_history_retrieve_multiple_values(self, history, ref): total_loss_0_to_1 = history[0:1, 'total_loss'] assert total_loss_0_to_1 == [n['total_loss'] for n in ref[0:1]] def test_history_non_existing_values(self, history): with pytest.raises(KeyError): # pylint: disable=pointless-statement history[:, 'non-existing'] with pytest.raises(KeyError): # pylint: disable=pointless-statement history[0, 'extra'] def test_history_non_existing_values_batch(self, history): with pytest.raises(KeyError): # pylint: disable=pointless-statement history[:, 'batches', :, 'non-existing'] with pytest.raises(KeyError): # pylint: disable=pointless-statement history[:, 'batches', 1, 'extra_batch'] def test_history_mixed_slicing(self, history, ref): losses = history[:, 'batches', 0, 'loss'] assert len(losses) == self.test_epochs assert losses == [epoch['batches'][0]['loss'] for epoch in ref] losses = history[0, 'batches', :, 'loss'] assert losses == [batch['loss'] for batch in ref[0]['batches']] def test_history_partial_and_full_index_batches(self, history, ref): loss_with_extra = history[:, 'batches', :, ('loss', 'extra_batch')] expected_e0 = [(b['loss'], b['extra_batch']) for b in ref[0]['batches'] if 'extra_batch' in b] expected_e1 = [(b['loss'], b['extra_batch']) for b in ref[1]['batches'] if 'extra_batch' in b] assert len(loss_with_extra) == self.test_epochs - 1 assert loss_with_extra[0] == expected_e0 assert loss_with_extra[1] == expected_e1 def test_history_partial_batches_batch_key_3rd(self, history, ref): extra_batches = history[:, 'batches', :, 'extra_batch'] expected_e0 = [b['extra_batch'] for b in ref[0]['batches'] if 'extra_batch' in b] expected_e1 = [b['extra_batch'] for b in ref[1]['batches'] if 'extra_batch' in b] # In every epoch there are 2 batches with the 'extra_batch' # key except for the last epoch. We therefore two results # of which one of them is an empty list. assert len(extra_batches) == self.test_epochs - 1 assert extra_batches[0] == expected_e0 assert extra_batches[1] == expected_e1 def test_history_partial_batches_batch_key_4th(self, history, ref): extra_batches = history[:, 'batches', :, 'extra_batch'] expected_e0 = [b['extra_batch'] for b in ref[0]['batches'] if 'extra_batch' in b] expected_e1 = [b['extra_batch'] for b in ref[1]['batches'] if 'extra_batch' in b] # In every epoch there are 2 batches with the 'extra_batch' # key except for the last epoch. We therefore two results # of which one of them is an empty list. assert len(extra_batches) == self.test_epochs - 1 assert extra_batches[0] == expected_e0 assert extra_batches[1] == expected_e1 def test_history_partial_singular_values(self, history): values = history[-1, ('duration', 'total_loss')] expected = (history[-1]['duration'], history[-1]['total_loss']) # pylint: disable=unidiomatic-typecheck assert type(values) == tuple assert values == expected def test_history_slice_beyond_batches_but_key_not_batches(self, history): with pytest.raises(KeyError) as exc: # pylint: disable=pointless-statement history[:, 'not-batches', 0] msg = exc.value.args[0] expected = ("History indexing beyond the 2nd level is " "only possible if key 'batches' is used, " "found key 'not-batches'.") assert msg == expected def test_history_with_invalid_epoch_key(self, history): key = slice(None), 'not-batches' with pytest.raises(KeyError) as exc: # pylint: disable=pointless-statement history[key] msg = exc.value.args[0] expected = "Key 'not-batches' was not found in history." assert msg == expected def test_history_too_many_indices(self, history): with pytest.raises(KeyError) as exc: # pylint: disable=pointless-statement history[:, 'batches', :, 'train_loss', :] msg = exc.value.args[0] expected = ("Tried to index history with 5 indices but only " "4 indices are possible.") assert msg == expected def test_history_save_load_cycle_file_obj(self, history, tmpdir): history_f = tmpdir.mkdir('skorch').join('history.json') with open(str(history_f), 'w') as f: history.to_file(f) with open(str(history_f), 'r') as f: new_history = History.from_file(f) assert history == new_history def test_history_save_load_cycle_file_path(self, history, tmpdir): history_f = tmpdir.mkdir('skorch').join('history.json') history.to_file(str(history_f)) new_history = History.from_file(str(history_f)) assert history == new_history
{"/skorch/tests/test_regressor.py": ["/skorch/exceptions.py"], "/skorch/tests/test_history.py": ["/skorch/history.py"]}
57,733
futahi/tsar
refs/heads/master
/secapp/migrations/0010_auto_20190909_1859.py
# Generated by Django 2.2.4 on 2019-09-09 15:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0009_auto_20190909_1841'), ] operations = [ migrations.RenameField( model_name='last', old_name='dost_alter', new_name='last_alter', ), migrations.RemoveField( model_name='last', name='dost_desc', ), migrations.RemoveField( model_name='last', name='dost_link', ), migrations.AddField( model_name='last', name='last_desc', field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name='last', name='last_file', field=models.FileField(blank=True, null=True, upload_to='Services'), ), migrations.AddField( model_name='last', name='last_link1', field=models.URLField(blank=True, null=True), ), migrations.AddField( model_name='last', name='last_link2', field=models.URLField(blank=True, null=True), ), migrations.AddField( model_name='last', name='last_reference', field=models.TextField(blank=True, null=True), ), migrations.AlterField( model_name='last', name='last_image', field=models.ImageField(blank=True, null=True, upload_to='Services'), ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,734
futahi/tsar
refs/heads/master
/secapp/migrations/0006_services.py
# Generated by Django 2.2.4 on 2019-09-09 11:23 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0005_dost'), ] operations = [ migrations.CreateModel( name='Services', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('service_name', models.CharField(max_length=100)), ('service_image', models.ImageField(upload_to='Services')), ('service_alter', models.CharField(max_length=50)), ('service_desc', models.TextField()), ('service_link', models.URLField()), ], ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,735
futahi/tsar
refs/heads/master
/secapp/admin.py
from django.contrib import admin from .models import Bottom from .models import Dost from .models import Services from .models import Offers from .models import Last, Famous, City admin.site.register(Bottom) admin.site.register(Dost) admin.site.register(Services) admin.site.register(Offers) admin.site.register(Last) admin.site.register(Famous) admin.site.register(City) # Register your models here.
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,736
futahi/tsar
refs/heads/master
/secapp/migrations/0009_auto_20190909_1841.py
# Generated by Django 2.2.4 on 2019-09-09 15:41 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0008_bottom_lin'), ] operations = [ migrations.CreateModel( name='Last', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('last_name', models.CharField(max_length=100)), ('last_image', models.ImageField(upload_to='Services')), ('dost_alter', models.CharField(max_length=50)), ('dost_desc', models.TextField()), ('dost_link', models.URLField()), ], ), migrations.AlterField( model_name='offers', name='offer_link', field=models.URLField(blank=True, null=True), ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,737
futahi/tsar
refs/heads/master
/secapp/migrations/0004_auto_20190905_0119.py
# Generated by Django 2.2.4 on 2019-09-04 22:19 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('secapp', '0003_auto_20190905_0041'), ] operations = [ migrations.RenameField( model_name='bottom', old_name='alter1', new_name='alter', ), migrations.RenameField( model_name='bottom', old_name='desc1', new_name='desc', ), migrations.RenameField( model_name='bottom', old_name='img1', new_name='img', ), migrations.RenameField( model_name='bottom', old_name='name1', new_name='name', ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,738
futahi/tsar
refs/heads/master
/secondsite/wsgi.py
import os import sys # add your project directory to the sys.path path = '/home/futahi1/tsar/' if path not in sys.path: # sys.path.insert(0, path) sys.path.append(path) os.chdir(path) # set environment variable to tell django where your settings.py is # os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings' os.environ.setdefault('DJANGO_SETTINGS_MODULE','secondsite.settings') # Import your Django project's configuration import django django.setup() # Import the Django WSGI to handle any requests import django.core.handlers.wsgi application = django.core.handlers.wsgi.WSGIHandler() # # serve django via WSGI # from django.core.wsgi import get_wsgi_application # application = get_wsgi_application()
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,739
futahi/tsar
refs/heads/master
/secapp/migrations/0005_dost.py
# Generated by Django 2.2.4 on 2019-09-08 02:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0004_auto_20190905_0119'), ] operations = [ migrations.CreateModel( name='Dost', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('dost_name', models.CharField(max_length=100)), ('dost_image', models.ImageField(upload_to='Services')), ('dost_alter', models.CharField(max_length=50)), ('dost_desc', models.TextField()), ('dost_link', models.URLField()), ], ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,740
futahi/tsar
refs/heads/master
/secapp/migrations/0013_city_famous.py
# Generated by Django 2.2.4 on 2019-10-25 21:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0012_auto_20190909_2303'), ] operations = [ migrations.CreateModel( name='City', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('city_name', models.CharField(max_length=100)), ('city_image', models.ImageField(upload_to='Services')), ('city_alter', models.CharField(max_length=50)), ('city_desc', models.TextField()), ('city_link', models.CharField(blank=True, max_length=150, null=True)), ], ), migrations.CreateModel( name='Famous', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fame_name', models.CharField(max_length=100)), ('fame_image', models.ImageField(upload_to='Services')), ('fame_alter', models.CharField(max_length=50)), ('fame_desc', models.TextField()), ('fame_link', models.CharField(blank=True, max_length=150, null=True)), ], ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,741
futahi/tsar
refs/heads/master
/secapp/urls.py
from django.urls import path from secapp import views urlpatterns = [ path('', views.home, name='home'), path('ourservices/', views.ourservices, name='ourservices'), path('ourteam/', views.ourteam, name='ourteam'), path('tovisit/', views.tovisit, name='tovisit'), path('dosto/', views.dosto, name='dosto'), path('famous/', views.famous, name='famous'), path('cities/', views.city, name='cities'), path('offers/', views.offers, name='offers'), path('services_details/<int:service_id>/', views.services_details, name='services_details'), path('about/', views.about, name='about'), path('visa/', views.visa, name='visa'), path('health/', views.health, name='health'), # path('dosto/hi/', views.hi, name='hi'), path('guide/', views.guide, name='guide'), #Detils pages path('dosto_details/<int:dosto_id>/', views.dosto_details, name='dosto_details'), path('city_details/<int:city_id>/', views.city_details, name='city_details'), path('face_details/<int:fame_id>/', views.face_details, name='face_details'), path('offer_details/<int:offer_id>/', views.offer_details, name='offer_details'), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,742
futahi/tsar
refs/heads/master
/secapp/views.py
from django.shortcuts import render, get_object_or_404 from django.utils.safestring import mark_safe from .models import Bottom from .models import Last from .models import Offers from .models import Services from .models import Dost, City, Famous def index(request): return render (request,'secapp/index.html') def home(request): offers=Offers.objects.all() services=Services.objects.all() bottoms=Bottom.objects.all() context={ 'title': ' الرئيسية', 'offers': offers, 'services': services, 'bottoms': bottoms, } return render(request,'secapp/home.html', context) def ourservices(request): services=Services.objects.all() context = { 'title': 'خدمات' , 'services': services , } return render(request, 'secapp/ourservices.html', context) def offers(request): offers=Offers.objects.all() context ={ 'title': 'عروض' , 'offers': offers , } return render(request, 'secapp/offers.html', context) def ourteam(request): return render(request,'secapp/ourteam.html') def tovisit(request): bottoms=Bottom.objects.all() context ={ 'title': 'روسيا' , 'bottoms': bottoms , } return render(request, 'secapp/tovisit.html', context) def dosto(request): dosts=Dost.objects.all() return render(request,'secapp/dosto.html', {'dosts': dosts}) def city (request): cities = City.objects.all() context = { 'cities': cities, 'title': 'مدن', } return render(request, 'secapp/cities.html', context) def famous (request): fames = Famous.objects.all() context = { 'fames': fames, 'title': 'شخصيات', } return render(request, 'secapp/famous.html', context) def about(request): context={ 'title': ' من نحن' } return render (request,'secapp/about.html', context) def hi(request): return render(request,'secapp/hi.html') def health(request): context={ 'title': ' السفر للعلاج ' } return render(request,'secapp/health.html', context) def guide(request): return render(request,'secapp/guide.html') def visa(request): return render(request,'secapp/visa.html') #details def offer_details(request, offer_id): offer = get_object_or_404(Offers, pk=offer_id) context = { 'title': offer , 'offer': offer , } return render(request, 'secapp/offer_details.html', context) def services_details(request, service_id): service = get_object_or_404(Services, pk=service_id) context ={ 'title': service , 'service': service , } return render(request, 'secapp/services_details.html', context) def dosto_details(request, dosto_id): dost = get_object_or_404(Dost, pk=dosto_id) context = { 'title': dost , 'dost': dost , } return render(request, 'secapp/dosto_details.html', context) def city_details(request, city_id): city = get_object_or_404(City, pk=city_id) context = { 'title': city , 'city': city , } return render(request, 'secapp/city_details.html', context) def face_details(request, fame_id): fame = get_object_or_404(Famous, pk=fame_id) context = { 'title': fame , 'fame': fame , } return render(request, 'secapp/face_details.html', context)
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,743
futahi/tsar
refs/heads/master
/secapp/migrations/0011_auto_20190909_2257.py
# Generated by Django 2.2.4 on 2019-09-09 19:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0010_auto_20190909_1859'), ] operations = [ migrations.AlterField( model_name='bottom', name='lin', field=models.CharField(blank=True, max_length=150, null=True), ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,744
futahi/tsar
refs/heads/master
/secapp/migrations/0012_auto_20190909_2303.py
# Generated by Django 2.2.4 on 2019-09-09 20:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0011_auto_20190909_2257'), ] operations = [ migrations.AlterField( model_name='dost', name='dost_link', field=models.CharField(blank=True, max_length=150, null=True), ), migrations.AlterField( model_name='offers', name='offer_link', field=models.CharField(blank=True, max_length=150, null=True), ), migrations.AlterField( model_name='services', name='service_link', field=models.CharField(blank=True, max_length=150, null=True), ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,745
futahi/tsar
refs/heads/master
/secapp/models.py
from django.db import models class Offers(models.Model): primary_key=True offer_name= models.CharField(max_length=100) offer_image= models.ImageField(upload_to='Services') offer_alter= models.CharField(max_length=50) offer_desc= models.TextField() offer_link= models.CharField(max_length=150, null=True, blank=True) def __str__(self): return self.offer_name class Services(models.Model): primary_key=True service_name= models.CharField(max_length=100) service_image= models.ImageField(upload_to='Services') service_alter= models.CharField(max_length=50) service_desc= models.TextField() service_link= models.CharField(max_length=150, null=True, blank=True) def __str__(self): return self.service_name class Bottom(models.Model): name= models.CharField(max_length=100) alter= models.CharField(max_length=100) img= models.ImageField(upload_to='Services') desc= models.TextField() lin= models.CharField(max_length=150, null=True, blank=True) # def __str__(self): # return self.name class Dost(models.Model): primary_key=True dost_name= models.CharField(max_length=100) dost_image= models.ImageField(upload_to='Services') dost_alter= models.CharField(max_length=50) dost_desc= models.TextField() dost_link= models.CharField(max_length=150, null=True, blank=True) def __str__(self): return self.dost_name class City(models.Model): primary_key=True city_name= models.CharField(max_length=100) city_image= models.ImageField(upload_to='Services') city_alter= models.CharField(max_length=50) city_desc= models.TextField() city_link= models.CharField(max_length=150, null=True, blank=True) def __str__ (self): return self.city_name class Famous(models.Model): primary_key=True fame_name= models.CharField(max_length=100) fame_image= models.ImageField(upload_to='Services') fame_alter= models.CharField(max_length=50) fame_desc= models.TextField() fame_link= models.CharField(max_length=150, null=True, blank=True) def __str__ (self): return self.fame_name class Last(models.Model): primary_key=True last_name= models.CharField(max_length=100) last_image= models.ImageField(upload_to='Services', null=True, blank=True ) last_file= models.FileField(upload_to='Services', null=True, blank=True) last_alter= models.CharField(max_length=50) last_desc= models.TextField(null=True, blank=True) last_link1=models.URLField(max_length=200, null=True, blank=True) last_link2=models.URLField(max_length=200, null=True, blank=True) last_reference= models.TextField(null=True, blank=True) def __str__(self): return self.last_name
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,746
futahi/tsar
refs/heads/master
/secapp/migrations/0007_offers.py
# Generated by Django 2.2.4 on 2019-09-09 12:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0006_services'), ] operations = [ migrations.CreateModel( name='Offers', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('offer_name', models.CharField(max_length=100)), ('offer_image', models.ImageField(upload_to='Services')), ('offer_alter', models.CharField(max_length=50)), ('offer_desc', models.TextField()), ('offer_link', models.URLField()), ], ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,747
futahi/tsar
refs/heads/master
/secapp/migrations/0003_auto_20190905_0041.py
# Generated by Django 2.2.4 on 2019-09-04 21:41 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secapp', '0002_auto_20190904_2349'), ] operations = [ migrations.AlterField( model_name='bottom', name='img1', field=models.ImageField(upload_to='Services'), ), ]
{"/secapp/admin.py": ["/secapp/models.py"], "/secapp/views.py": ["/secapp/models.py"]}
57,748
ZiyadBastaili/nfts_data_scraping
refs/heads/main
/collections/test.py
from Pillow import Image im = Image.open("Fluf World.png") im.show()
{"/main_scrape.py": ["/web_scraping/dappRadar_alltable.py"]}
57,749
ZiyadBastaili/nfts_data_scraping
refs/heads/main
/main_scrape.py
from web_scraping.dappRadar_alltable import Scrap app = Scrap() def run_app(): app.run_app() run_app()
{"/main_scrape.py": ["/web_scraping/dappRadar_alltable.py"]}
57,750
ZiyadBastaili/nfts_data_scraping
refs/heads/main
/web_scraping/dappRadar_alltable.py
import time from datetime import datetime import os from bs4 import BeautifulSoup #import twint import pandas as pd #from selenium import webdriver import warnings warnings.filterwarnings('ignore') import streamlit as st import re from GrabzIt import GrabzItClient from GrabzIt import GrabzItImageOptions from GrabzIt import GrabzItTableOptions #from webdriver_manager.chrome import ChromeDriverManager from annotated_text import annotated_text from urllib import request from PIL import Image class Scrap(): def __init__(self): st.set_page_config(page_title='Data scraping from Web Site', layout= "wide") st.markdown(""" <style> footer {visibility: hidden;} </style> """, unsafe_allow_html=True) padding = 3 st.markdown(f""" <style> .reportview-container .main .block-container{{ padding-top: {padding}rem; padding-right: {padding}rem; padding-left: {padding}rem; padding-bottom: {padding}rem; }} </style> """, unsafe_allow_html=True) # button style st.markdown(" <style> .css-2trqyj:focus:not(:active) {border-color: #ffffff;box-shadow: none;color: #ffffff;background-color: #0066cc;}.css-2trqyj:focus:(:active) {border-color: #ffffff;box-shadow: none;color: #ffffff;background-color: #0066cc;}.css-2trqyj:focus:active){background-color: #0066cc;border-color: #ffffff;box-shadow: none;color: #ffffff;background-color: #0066cc;}</style> ", unsafe_allow_html=True) def run_app(self): self.frame() def frame(self): self.title() self.body() self.footer() def title(self): st.image("data/images/background.png", use_column_width=True) def footer(self): st.markdown('<i style="font-size:11px">alpha version 0.1</i>', unsafe_allow_html=True) def navigation(self, nav, name_product = None, link=None): st.session_state.nav = nav st.session_state.name_product = name_product st.session_state.link = link def time_nav(self, c_nav, t_nav): st.session_state.c_nav = c_nav st.session_state.t_nav = t_nav def body(self): if 'nav' not in st.session_state: st.session_state.nav = 'Home' if st.session_state.nav == 'Home': st.title('NFTs Overview') st.markdown("<h3>Discover the hottest NFT collections, marketplace rankings, and top real-time sales</h3>", unsafe_allow_html=True) a = self.image_scraping("https://dappradar.com/nft", 'top_15_collections', targetElement = '.sc-aKZfe', hideElement = '#om-nrz2xrzgki288pvo2jhx') b = self.image_scraping("https://dappradar.com/nft", 'top_5_sales', targetElement = '.sc-gIRixj', hideElement = '#om-nrz2xrzgki288pvo2jhx') c = self.image_scraping("https://dappradar.com/nft", 'top_5_marketplaces', targetElement = '.sc-kmASHI', hideElement = '#om-nrz2xrzgki288pvo2jhx') col1, col2, col3 = st.columns((1.5,0.2,1.1)) with col1: st.markdown("<h1> Top 15 Collections </h1>", unsafe_allow_html=True) st.image(a, use_column_width=True) st.button("Show All (More than 190)", key='Collections', on_click=self.navigation, args=('Collections', )) with col3: st.markdown("<h1> Top 5 Sales </h1>", unsafe_allow_html=True) st.image(b, use_column_width=True) st.button("Show All", key='Sales', on_click=self.navigation, args=('Sales', )) st.markdown("<br>", unsafe_allow_html=True) st.markdown("<h1> Top 5 Marketplaces </h1>", unsafe_allow_html=True) st.image(c, use_column_width=True) st.button("Show All (Top 25)", key='Marketplaces', on_click=self.navigation, args=('Marketplaces', )) if st.session_state.nav == 'Collections': if 'c_nav' not in st.session_state: st.session_state.c_nav = 'table table-hover js-top-by-sales-table-24h summary-sales-table' st.session_state.t_nav = '24h' if 'link' not in st.session_state: st.session_state.link = '' st.title("Top Collections ") st.markdown("<h3>Rankings for NFT collections. Discover the top NFT collections across multiple protocols including Ethereum, BSC, WAX and Flow <br><br><br></h3>", unsafe_allow_html=True) #self.image_scraping("https://cryptoslam.io/", 'all_collections', targetElement = '.table') col1, col2, col3 = st.columns((1,1,5)) col1.button('↩️ NFTs Overview', key='Home', on_click=self.navigation, args=('Home',)) col2.button('💰 Marketplaces', key='Collections', on_click=self.navigation, args=('Marketplaces',)) df = self.get_collections(st.session_state.c_nav,st.session_state.t_nav) st.markdown('<br><br>', unsafe_allow_html=True) st.markdown(annotated_text(("The Data was obtained at the time:", "", "#faa"), " ", str(datetime.now())), unsafe_allow_html=True) st.markdown('<br><br>', unsafe_allow_html=True) c0, c1, c2, c3, c4 = st.columns((6,1, 1, 1, 1)) c1.button("24 hours", key='24 hours', on_click=self.time_nav, args=('table table-hover js-top-by-sales-table-24h summary-sales-table','24h',)) c2.button("7 days", key='7 days', on_click=self.time_nav, args=('table table-hover js-top-by-sales-table-7d summary-sales-table', '7d',)) c3.button("30 days", key='30 days', on_click=self.time_nav, args=('table table-hover js-top-by-sales-table-30d summary-sales-table', '30d',)) c4.button("All time", key='All time', on_click=self.time_nav, args=('table table-hover js-top-by-sales-table-all summary-sales-table','all time',)) st.markdown('-----') c1, c2, c3, c4, c5, c6, c7, c8, c9 = st.columns((0.5, 0.3, 1, 1, 1, 0.9, 1, 1, 0.6)) c1.markdown(annotated_text(('RANK', "", "#ffff"), "", ''), unsafe_allow_html=True) c3.markdown(annotated_text(('PRODUCT', "", "#ffff"), "", ''), unsafe_allow_html=True) c4.markdown(annotated_text(('SALES', "", "#ffff"), "", ''), unsafe_allow_html=True) if st.session_state.t_nav != 'all time': c5.markdown(annotated_text(('Change ('+st.session_state.t_nav+')', "", "#ffff"), "", ''), unsafe_allow_html=True) else: c5.markdown(annotated_text(('OWNERS', "", "#ffff"), "", ''), unsafe_allow_html=True) c6.markdown(annotated_text(('BUYERS', "", "#ffff"), "", ''), unsafe_allow_html=True) c7.markdown(annotated_text(('TRANSACTIONS', "", "#ffff"), "", ''), unsafe_allow_html=True) c8.markdown(annotated_text(('PROTOCOLE', "", "#ffff"), "", ''), unsafe_allow_html=True) c9.markdown(annotated_text(('ANALYSE', "", "#ffff"), "", ''), unsafe_allow_html=True) st.markdown('-----') for i in range(len(df)): RANK = df.loc[i, 'RANK'] PRODUCT = df.loc[i, 'PRODUCT'] SALES = df.loc[i, 'SALES'] if st.session_state.t_nav != 'all time': Change = df.loc[i, 'Change ('+st.session_state.t_nav+')'] else: OWNERS = df.loc[i, 'OWNERS'] BUYERS = df.loc[i, 'BUYERS'] TRANSACTIONS = df.loc[i, 'TRANSACTIONS'] COLOR = df.loc[i, 'COLOR'] PROTOCOLE = df.loc[i, 'PROTOCOLE'] LINK = df.loc[i, 'LINK'] c1,c2,c3,c4,c5,c6,c7,c8,c9 = st.columns((0.5,0.3,1,1,1,1,1,1,0.5)) c1.markdown(annotated_text((RANK, "", "#ffff"), "",''), unsafe_allow_html=True) try: c2.image("collections/"+PRODUCT+".png", use_column_width=True) except: try: img = Image.open("collections/" + PRODUCT + ".ico") img.save("collections/" + PRODUCT + ".png", 'png') c2.image("collections/" + PRODUCT + ".png", use_column_width=True) except: c2.image("collections/blank.png", use_column_width=True) c3.write(PRODUCT) c4.write(SALES) if COLOR == '#ca2d2d': c5.write('🔻 ' + Change) elif COLOR== '#1d8843': c5.write('🟩 ' + Change) else: c5.write('🟡 ' + OWNERS) c6.write(BUYERS) c7.write(TRANSACTIONS) if PROTOCOLE == '/img/ethereum-logo.png': c8.write('ETHEREUM') elif PROTOCOLE == '/img/ronin-logo.png': c8.write('RONIN') elif PROTOCOLE == '/img/flow-logo.png': c8.write('FLOW') elif PROTOCOLE == '/img/polygon-logo.png': c8.write('POLYGON') elif PROTOCOLE == '/img/wax-logo.png': c8.write('WAX') elif PROTOCOLE == '/img/bsc-logo.png': c8.write('BSC') else: c8.write('other') c9.button("🔍", key='Magnifying Glass '+str(i), on_click=self.navigation, args=('Analyse',PRODUCT,LINK,)) st.markdown('-----') # icon_list = df['ICONS'].tolist() # icon_name = df['PRODUCT'].tolist() # i = 0 # path = re.sub("\\\web_scraping", "", os.path.abspath("collections")) # for url in icon_list: # try: # r = requests.get(url, allow_redirects=True) # # if url[-3:] == 'ico': # open(path +'\\'+icon_name[i]+'.ico', 'wb').write(r.content) # else: # open(path +'\\'+icon_name[i]+'.png', 'wb').write(r.content) # except: # print('error in '+str(i)) # i+=1 st.write(df) # col1, col2, col3, col4 = st.columns((0.1,1,0.3,0.1)) # col2.image("tables_of_collections/all_collections.png", use_column_width=True) # col3.image("tables_of_collections/protocols.png", use_column_width=True) # df = self.top_collections('https://dappradar.com/nft/collections') # st.write(df) if st.session_state.nav == 'Sales': st.title("Top Sales ") st.markdown("<br><br><br>", unsafe_allow_html=True) col1, col2, col3 = st.columns((1,1,5)) col1.button('↩️ NFTs Overview', key='Home', on_click=self.navigation, args=('Home',)) col2.button('💎 Collections', key='Collections', on_click=self.navigation, args=('Collections',)) if st.session_state.nav == 'Marketplaces': st.title("Top 25 Marketplaces (24h)") st.markdown("<h3>NFT marketplace rankings. Find non-fungible token trading volumes, number of traders per NFT marketplace and more key metrics. <br><br><br></h3>", unsafe_allow_html=True) col1, col2, col3 = st.columns((1,1,5)) col1.button('↩️ NFTs Overview', key='Home', on_click=self.navigation, args=('Home',)) col2.button('💎 Collections', key='Collections', on_click=self.navigation, args=('Collections',)) self.image_scraping("https://dappradar.com/nft/marketplaces/1", 'all_marketplaces', targetElement='.sc-iIEYCM', hideElement='#om-nrz2xrzgki288pvo2jhx') col1, col2, col3 = st.columns((0.1, 1, 0.1)) with col2: st.image("tables_of_collections/all_marketplaces.png", use_column_width=True) if st.session_state.nav == 'Analyse': c1, c2 = st.columns((0.7, 5)) try: c1.image("collections/" + st.session_state.name_product + ".png", use_column_width=True) except: try: img = Image.open("collections/" + st.session_state.name_product + ".ico") img.save("collections/" + st.session_state.name_product + ".png", 'png') c1.image("collections/" + st.session_state.name_product + ".png", use_column_width=True) except: c1.image("collections/blank.png", use_column_width=True) c2.title(st.session_state.name_product + ' NFTs statistics') c2.markdown(st.session_state.name_product +' sales volume data, graphs & charts ', unsafe_allow_html=True) col1, col2, col3 = st.columns((1,1,5)) col1.button('↩️ NFTs Overview', key='Home', on_click=self.navigation, args=('Home',)) col2.button('💎 Collections', key='Collections', on_click=self.navigation, args=('Collections',)) st.markdown('<br><br> ', unsafe_allow_html=True) link = 'https://cryptoslam.io'+st.session_state.link img = self.get_collections_seles(link, st.session_state.name_product) c1, c2, c3, c4 = st.columns(4) c1.caption('The USD value of sales from all marketplaces over the last 24 hour period') c2.caption('The number of owners making a purchase on any marketplace over the last 24 hour period') c3.caption('The number of owners selling a NFT on any marketplace over the last 24 hour period') c4.caption('The number of newly minted NFTs over the past 24 hour period') st.image(img, use_column_width=True) st.markdown('<br><br> ', unsafe_allow_html=True) path_file = self.get_summary_seles(link = 'https://cryptoslam.io'+st.session_state.link +'/sales/summary') if path_file is not None: df = pd.read_csv(path_file) df = df.iloc[:, 1:] # drop first column st.table(df) df = df.iloc[:-1] # drop last row st.markdown('<br><br> ', unsafe_allow_html=True) import altair as alt st.markdown('Total Transactions per Month', unsafe_allow_html=True) df['Total Transactions'] = df['Total Transactions'].apply(lambda x: re.sub(',', '', x)) trans = alt.Chart(df).mark_line().encode( x="Month", y="Total Transactions:Q", color = alt.value('#FFD700') ).properties(width=500, height=400) st.altair_chart(trans, use_container_width=True) st.markdown('Sales (USD) per Month', unsafe_allow_html=True) df['Sales (USD)'] = df['Sales (USD)'].apply(lambda x: re.sub('\\$|,', '', x)) sales = alt.Chart(df).mark_line().encode( x="Month", y="Sales (USD):Q", color = alt.value('#FFD700') ).properties(width=500, height=400) st.altair_chart(sales, use_container_width=True) st.markdown('Sales (ETH) per Month', unsafe_allow_html=True) df['Sales (ETH)'] = df['Sales (ETH)'].apply(lambda x: re.sub(',', '', x)) sales = alt.Chart(df).mark_line().encode( x="Month", y="Sales (ETH):Q", color = alt.value('#FFD700') ).properties(width=500, height=400) st.altair_chart(sales, use_container_width=True) st.markdown('Avg Sale (USD) per Month', unsafe_allow_html=True) df['Avg Sale (USD)'] = df['Avg Sale (USD)'].apply(lambda x: re.sub('\\$','',x)) Avg_usd = alt.Chart(df).mark_line().encode( x="Month", y="Avg Sale (USD):Q", color = alt.value('#FFD700') ).properties(width=500, height=400) st.altair_chart(Avg_usd, use_container_width=True) st.markdown('Avg Sale (ETH) per Month', unsafe_allow_html=True) df['Avg Sale (ETH)'] = df['Avg Sale (ETH)'].apply(lambda x: re.sub(',', '', x)) Avg_eth = alt.Chart(df).mark_line().encode( x="Month", y="Avg Sale (ETH):Q", color = alt.value('#FFD700') ).properties(width=500, height=400) st.altair_chart(Avg_eth, use_container_width=True) else: st.markdown('<br><br><br>', unsafe_allow_html=True) st.markdown('No Data Found', unsafe_allow_html=True) st.markdown('<br><br><br>', unsafe_allow_html=True) # b = alt.Chart(df).mark_area(opacity=0.6).encode( # x='Month', y='Total Transactions') # c = alt.layer(a, b) def parsing_data(self, text): return re.sub("(\xa0)|(\n)|(\r)|(\")|(\'),", "",text) def image_scraping(self, URLToImage, FileName, targetElement = None, hideElement = None): if targetElement is not None or hideElement is not None: options = GrabzItImageOptions.GrabzItImageOptions() if targetElement is not None: options.targetElement = targetElement if hideElement is not None: options.hideElement = hideElement grabzIt = GrabzItClient.GrabzItClient("MWRiMTVhYTcwM2Y5NDIzODlhNmUwYzdlNmUwYzMyYjY=", "Fj9ePz8/Pz8/Pwk/HT8/cS9ZP1FxYg8/Pz8aGT8fJj8=") grabzIt.URLToImage(URLToImage, options) #path = re.sub("\\\web_scraping", "", os.path.abspath("tables_of_collections")) path = '/home/ubuntu/Ziyad_Apps/nfts_data_scraping/tables_of_collections/' grabzIt.SaveTo(path +FileName+'.png') return path +FileName+'.png' def driver_config(self, executable_path): global driver option = webdriver.ChromeOptions() option.add_argument('--ignore-certificate-errors') option.add_argument('--incognito') option.add_argument('--headless') driver = None try: driver = webdriver.Chrome( executable_path = executable_path, options = option) except: driver = webdriver.Chrome(ChromeDriverManager().install(), options=option) return driver def git_page_source(self, url): driver = self.driver_config("C:/Users/hp/.wdm/drivers/chromedriver/win32/91.0.4472.101/chromedriver.exe") driver.get(url) content = driver.page_source soup = BeautifulSoup(content, features="html.parser") #driver.close() return soup def top_collections(self, url): soup = self.git_page_source(url) gdp = soup.find_all("div", attrs={"class": "sc-kmASHI sc-cvJHqN eeGfwZ cktZhQ rankings-table"}) try: body = gdp.find_all("div", recursive=False) except: body = gdp[0].find_all("div", recursive=False) head = body[2] body_rows = body[3:] headings = [] h = head.find("div").find("div") for item in h.findChildren("div"): item = (item.text).rstrip("\n") headings.append(item) #print(headings) all_rows = [] for row_num in range(len(body_rows)): row = [] row_item = body_rows[row_num].findChildren("div", recursive=False) rank = row_item[0] row.append(self.parsing_data(rank.text)) Children = row_item[1].findChildren("div", recursive=False) name = Children[1].find("a", attrs={'class': 'nft-name-link'}) if name is None: name = Children[1].find("span", attrs={'class': 'nft-name-link'}) row.append(self.parsing_data(name.text)) # try: # src = Children[0].find('img')['src'] # except: # src = name.text # row.append(src) crypto = Children[1].find("div", attrs={'class': 'sc-jHVexB epRmzg'}) row.append(self.parsing_data(crypto.text)) Volume = row_item[2].findChildren("div", recursive=False) row.append(self.parsing_data(Volume[0].text)) row.append(self.parsing_data(Volume[1].text)) Traders = row_item[3].findChildren("div", recursive=False) row.append(self.parsing_data(Traders[0].text)) row.append(self.parsing_data(Traders[1].text)) Sales = row_item[4].findChildren("div", recursive=False) row.append(self.parsing_data(Sales[0].text)) row.append(self.parsing_data(Sales[1].text)) all_rows.append(row) #print(all_rows) headings.insert(0, 'RANK') #headings.insert(2, 'ICONE') headings.insert(3, 'PROTOCOLS') headings.insert(5, 'VOLUME CHANGE') headings.insert(7, 'TRADERS CHANGE') headings.insert(9, 'SALES CHANGE') df = pd.DataFrame(data=all_rows, columns=headings) return df # df.to_csv(r'result.csv', index=False) # print(df) def get_collections(self, class_name, time): response = request.urlopen("https://cryptoslam.io/") page_source = response.read() soup = BeautifulSoup(page_source, 'html.parser') gdp = soup.find_all("table", attrs={"class": class_name}) table1 = gdp[0] body = table1.find_all("tr") head = body[0] body_rows = body[1:] all_rows = [] for row_num in range(len(body_rows)): row = [] for row_item in body_rows[row_num].find_all("td"): aa = re.sub("(\xa0)|(\n)|(\r)|,", "", row_item.text) try: row_item = row_item.find("span", attrs={'class': re.compile("product-name")}) aa = re.sub("(\xa0)|(\n)|(\r)|,", "", row_item.text) except: pass row.append(aa) for row_item in body_rows[row_num].find_all("td"): # loop through all row entries try: img = row_item.img['src'] row.append(img) except: pass try: color = body_rows[row_num].find('td', attrs={'class': 'summary-sales-table__column summary-sales-table__column-change ' 'summary-sales-table__no-wrap cursor-default'}).find('span') color = color['style'][7:14] row.append(color) except: row.append("no color") try: product_link = body_rows[row_num].find("td", attrs={'class': 'summary-sales-table__column summary-sales-table__column-product summary-sales-table__cell-product product'}) product_link = product_link.find('a') product_link = product_link['href'] row.append(product_link) except: row.append("no link") all_rows.append(row) headings = ['RANK','PRODUCT','None','SALES','Change ('+time+')','BUYERS','TRANSACTIONS','ICONS','PROTOCOLE','COLOR', 'LINK'] if time == 'all time': headings = ['RANK', 'PRODUCT', 'None', 'SALES', 'BUYERS', 'TRANSACTIONS', 'OWNERS', 'ICONS', 'PROTOCOLE', 'COLOR', 'LINK'] df = pd.DataFrame(data=all_rows, columns=headings) del df['None'] return df # df.to_csv(r'hjhugy.csv', index=False) # print(df) def get_collections_seles(self, link, filename): options = GrabzItImageOptions.GrabzItImageOptions() # options.targetElement = 'div.statistics-row__stat:nth-child(1) div:nth-child(1) div:nth-child(2) a:nth-child(1) div:nth-child(2) div:nth-child(2)' options.waitForElement = 'div.statistics-row__stat:nth-child(1) div:nth-child(1) div:nth-child(2) a:nth-child(1) div:nth-child(2) div:nth-child(2) canvas:nth-child(1)' options.targetElement = 'div.statistics-row' options.hideElement = 'div.ibox-content:nth-child(5) , .fa, .fab, .fal, .far, .fas' options.delay=1000 grabzIt = GrabzItClient.GrabzItClient("MWRiMTVhYTcwM2Y5NDIzODlhNmUwYzdlNmUwYzMyYjY=","Fj9ePz8/Pz8/Pwk/HT8/cS9ZP1FxYg8/Pz8aGT8fJj8=") grabzIt.URLToImage(link, options) #path = re.sub("\\\web_scraping", "", os.path.abspath("tables_of_collections")) #grabzIt.SaveTo(path + "\\" + filename + ".png") path = '/home/ubuntu/Ziyad_Apps/nfts_data_scraping/tables_of_collections/' grabzIt.SaveTo(path +filename+".png") return path +filename+".png" def get_summary_seles(self, link): grabzIt = GrabzItClient.GrabzItClient("MWRiMTVhYTcwM2Y5NDIzODlhNmUwYzdlNmUwYzMyYjY=", "Fj9ePz8/Pz8/Pwk/HT8/cS9ZP1FxYg8/Pz8aGT8fJj8=") options = GrabzItTableOptions.GrabzItTableOptions() options.tableNumberToInclude = 1 grabzIt.URLToTable(link, options) # Then call the Save or SaveTo method #path = re.sub("\\\web_scraping", "", os.path.abspath("tables_of_collections")) #grabzIt.SaveTo(path + "\\" + "summary.csv") path = '/home/ubuntu/Ziyad_Apps/nfts_data_scraping/tables_of_collections/' try : grabzIt.SaveTo(path +"summary.csv.png") return path +"summary.csv.png" except: return None # # options = webdriver.ChromeOptions() # options.add_argument('--ignore-certificate-errors') # options.add_argument('--incognito') # options.add_argument('--headless') # #driver = webdriver.Chrome(ChromeDriverManager().install(),options=option) # driver = webdriver.Chrome(executable_path="C:/Users/hp/.wdm/drivers/chromedriver/win32/91.0.4472.101/chromedriver.exe",options=options) # url = "https://dappradar.com/nft/collections" # driver.get(url) # # # # content = driver.page_source # soup = BeautifulSoup(content, features="html.parser") # driver.close() # # gdp = soup.find_all("div", attrs={"class": "sc-kmASHI sc-cvJHqN eeGfwZ cktZhQ rankings-table"}) # try : # body = gdp.find_all("div", recursive=False) # except: # body = gdp[0].find_all("div", recursive=False) # # head = body[2] # body_rows = body[3:] # # headings = [] # h = head.find("div").find("div") # for item in h.findChildren("div"): # item = (item.text).rstrip("\n") # headings.append(item) # print(headings) # # # all_rows = [] # for row_num in range(len(body_rows)): # row = [] # # row_item = body_rows[row_num].findChildren("div", recursive=False) # # rank = row_item[0] # row.append(parsing_data(rank.text)) # # Children = row_item[1].findChildren("div", recursive=False) # # name = Children[1].find("a", attrs={'class': 'nft-name-link'}) # if name is None: # name = Children[1].find("span", attrs={'class': 'nft-name-link'}) # row.append(parsing_data(name.text)) # try : # src = Children[0].find('img')['src'] # except: # src = name.text # row.append(src) # # crypto = Children[1].find("div", attrs={'class': 'sc-jHVexB epRmzg'}) # row.append(parsing_data(crypto.text)) # # Volume = row_item[2].findChildren("div", recursive=False) # row.append(parsing_data(Volume[0].text)) # row.append(parsing_data(Volume[1].text)) # # Traders = row_item[3].findChildren("div", recursive=False) # row.append(parsing_data(Traders[0].text)) # row.append(parsing_data(Traders[1].text)) # # Sales = row_item[4].findChildren("div", recursive=False) # row.append(parsing_data(Sales[0].text)) # row.append(parsing_data(Sales[1].text)) # # all_rows.append(row) # # print(all_rows) # # headings.insert(0,'rank') # headings.insert(2,'icone') # headings.insert(3,'crypto-chaine') # headings.insert(5,'volume-change') # headings.insert(7,'traders-change') # headings.insert(9,'sales-change') # # df = pd.DataFrame(data=all_rows, columns=headings) # df.to_csv(r'result.csv', index = False) # print(df) #------------------------------------------ #------------------------------------------ #------------------------------------------ # #------------------------- test # all_rows = [] # for row_num in range(len(body_rows)): # row = [] # for row_item in body_rows[row_num].findChildren("div", recursive=False): # children = row_item.findChildren("div", recursive=False) # if len(children)==0: # row.append(row_item.text) # else: # for row_item_2 in children: # name = row_item_2.find("a", attrs={'class': 'nft-name-link'}) # crypto = row_item_2.find("div", attrs={'class': 'sc-jHVexB epRmzg'}) # prjt_img = row_item_2.find("img", attrs={'class': 'sc-dwfUOf bNQBLt img-loaded'}) # if name is not None and crypto is not None: # row.append(name.text) # row.append(crypto.text) # elif prjt_img is not None: # row.append(prjt_img['src']) # else: # row.append(row_item_2.text) # # all_rows.append(row) # #print(all_rows) # headings.insert(0,'rank') # headings.insert(1,'icone') # headings.insert(3,'crypto-chaine') # headings.insert(5,'volume-change') # headings.insert(7,'traders-change') # headings.insert(9,'sales-change') # # df = pd.DataFrame(data=all_rows, columns=headings) # df.to_csv(r'result.csv', index = False) # print(df.head(10)) # def display_profile(username): # def get_collection(username): # c = twint.Config() # c.Username = username # c.Hide_output = True # twint.run.Lookup(c) # collection = twint.output.users_list[-1] # return collection # # try: # # get the profile_pic_url # collection = get_collection(username) # prof_pic = collection.avatar.replace("normal", "100x100") # # download the image in a folder called static I created # response = requests.get(prof_pic) # filename = "collections/username.jpg" # with open(filename, "wb") as f: # f.write(response.content) # # return filename # except: # return None
{"/main_scrape.py": ["/web_scraping/dappRadar_alltable.py"]}
57,846
klgentle/lc_python
refs/heads/master
/leet_code/A_LeetCode_Grinding_Guide/Greedy/P0135_Candy.py
""" 135. Candy Hard There are N children standing in a line. Each child is assigned a rating value. You are giving candies to these children subjected to the following requirements: Each child must have at least one candy. Children with a higher rating get more candies than their neighbors. What is the minimum candies you must give? Example 1: Input: [1,0,2] Output: 5 Explanation: You can allocate to the first, second and third child with 2, 1, 2 candies respectively. Example 2: Input: [1,2,2] Output: 4 Explanation: You can allocate to the first, second and third child with 1, 2, 1 candies respectively. The third child gets 1 candy because it satisfies the above two conditions. """ class Solution: def candy(self, ratings: List[int]) -> int: size = len(ratings) if size < 2: return size candies = [1] * size for i in range(1, size): if ratings[i-1] < ratings[i]: candies[i] = candies[i-1] +1 #print(candies) for i in range(size-1, 0, -1): if ratings[i] < ratings[i-1]: # 必须使用Max,不然会降低candies[i-1]的值 candies[i-1] = max(candies[i-1], candies[i] +1) #print(candies) return sum(candies)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,847
klgentle/lc_python
refs/heads/master
/frequency_model/setVolume.py
# coding=utf-8 import win32api WM_APPCOMMAND = 0x319 APPCOMMAND_VOLUME_MAX = 0x0A APPCOMMAND_VOLUME_MIN = 0x09 def setVolume(volume: int): # 按公式处理音量数值 # l = int(APPCOMMAND_VOLUME_MAX) - int(APPCOMMAND_VOLUME_MIN) volume = volume / 100 mult = int(volume*0x10000) win32api.SendMessage(-1, WM_APPCOMMAND, 0x30292, APPCOMMAND_VOLUME_MAX * hex(mult)) if __name__ == "__main__": # 中等音量 setVolume(15) # 静音 # SetVolume(0)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,848
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/array/p0556_Next_Greater_Element_III.py
""" 556. Next Greater Element III Medium Given a positive integer n, find the smallest integer which has exactly the same digits existing in the integer n and is greater in value than n. If no such positive integer exists, return -1. Note that the returned integer should fit in 32-bit integer, if there is a valid answer but it does not fit in 32-bit integer, return -1. Example 1: Input: n = 12 Output: 21 Example 2: Input: n = 21 Output: -1 """ from itertools import permutations class Solution: def nextGreaterElement(self, n: int) -> int: ms = list(str(n)) res = 2**31 for m in permutations(ms, len(ms)): v = int("".join(m)) #print(v) if v > n and v <= 2**31 - 1: res = min(res, v) if res != 2**31: return res else: return -1
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,849
klgentle/lc_python
refs/heads/master
/base_practice/list_test.py
from timeit import Timer def test_list_append(): li = [] for i in range(10000): li.append(i) return li def test_list_add(): li = [] for i in range(10000): li += [i] return li def test_list_derivation(): return [i for i in range(10000)] def test_list_function(): return list(range(10000)) timer1 = Timer("test_list_append()", "from __main__ import test_list_append") print("test_list_append:", timer1.timeit(1000)) timer1 = Timer("test_list_add()", "from __main__ import test_list_add") print("test_list_add:", timer1.timeit(1000)) timer1 = Timer("test_list_derivation()", "from __main__ import test_list_derivation") print("test_list_derivation:", timer1.timeit(1000)) timer1 = Timer("test_list_function()", "from __main__ import test_list_function") print("test_list_function:", timer1.timeit(1000))
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,850
klgentle/lc_python
refs/heads/master
/automatic_office/decryptZipFile.py
import os import py7zr def decryptZipFile(filename): print("current dir is: ", os.getcwd()) print(f"filename is: {filename}") a = 0 with open('pwd_guss.txt', "r", encoding="utf8") as fpPwd: for pwd in fpPwd: pwd = pwd.rstrip() print(f'Try password {pwd} ...') try: qz = py7zr.SevenZipFile(filename, password=pwd) qz.extractall() print("破解成功,密码是" + pwd) a += 1 break except: pass if a == 0: print("破解失败") if __name__ == '__main__': os.chdir(r"D:\code_test\game_test\confidential_data") #decryptZipFile("girl_r15.7z") #decryptZipFile("pvideo.7z") decryptZipFile("personal.7z")
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,851
klgentle/lc_python
refs/heads/master
/leet_code/easy_case/validPalindrome.py
# coding:utf8 import pysnooper class Solution: # @pysnooper.snoop() def validPalindrome(self, s: str) -> bool: r = s[::-1] if r == s: return True l = len(s) for i in range(l): if s[i] != r[i]: m = s[:i] + s[i + 1 :] n = r[:i] + r[i + 1 :] return m == m[::-1] or n == n[::-1] if __name__ == "__main__": a = Solution() s = "cbbcc" a.validPalindrome(s)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,852
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/dynamic_programming/P0322_Coin_Change.py
""" 322. Coin Change TODO enhance Medium You are given coins of different denominations and a total amount of money amount. Write a function to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1. You may assume that you have an infinite number of each kind of coin. Example 1: Input: coins = [1,2,5], amount = 11 Output: 3 Explanation: 11 = 5 + 5 + 1 Example 2: Input: coins = [2], amount = 3 Output: -1 """ class Solution: def coinChange(self, coins, amount: int) -> int: # coins:List[int] res = {} res[0] = 0 def dp(n): # dp(n) = min(dp(n-coin)+1, dp(n)) if n in res.keys(): return res[n] if n < 0: return -1 res[n] = float("INF") for coin in coins: if dp(n - coin) == -1: continue res[n] = min(dp(n - coin) + 1, res[n]) return res[n] if res[n] != float("INF") else -1 return dp(amount)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,853
klgentle/lc_python
refs/heads/master
/leet_code/easy_case/sumRange.py
class NumArray: def __init__(self, nums: list): self.sum = [0] s = 0 for n in nums: s += n self.sum.append(s) def sumRange(self, i: int, j: int) -> int: print(f"i:{i}") print(f"sum[i]:{self.sum[i]}") print(f"j:{j}") print(f"sum[j]:{self.sum[j]}") return self.sum[j+1] - self.sum[i] if __name__ == "__main__": nums = [-2, 0, 3, -5, 2, -1] a = NumArray(nums) a.sumRange(3,5)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,854
klgentle/lc_python
refs/heads/master
/database/change_files_encode.py
import chardet import codecs import logging import sys import os import encodings.idna logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") def convert_file(filename, out_enc): # !!! does not backup the origin file content = "" with codecs.open(filename, "rb") as f: content = f.read() source_encoding = chardet.detect(content)["encoding"] if not source_encoding: logging.info("source_encoding??", filename) return if source_encoding != out_enc: logging.info("{0},{1}".format(source_encoding, filename)) content = content.decode(source_encoding, "ignore") with codecs.open(filename, "w", encoding=out_enc) as f: f.write(content) def convert_folder(dir, out_enc="GB2312"): for root, dirs, files in os.walk(dir): for file in files: print(file) path = os.path.join(root, file) convert_file(path, out_enc) if __name__ == "__main__": #dir="/mnt/e/cx_ods/ODS需求跟问题/" #usage: python3 change_files_encode.py dir_name gb2312 convert_folder(sys.argv[1], sys.argv[2])
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,855
klgentle/lc_python
refs/heads/master
/tests/testCreateWeekReport.py
import os import sys from os.path import dirname import datetime import pprint import unittest import docx # 项目根目录 BASE_DIR = dirname(dirname(os.path.abspath(__file__))) # 添加系统环境变量 sys.path.append(BASE_DIR) from automatic_office.CreateWeekReport import CreateWeekReport class testCreateWeekReport(unittest.TestCase): def setUp(self): print("setUp...") self.report = CreateWeekReport("202003") self.from_word = self.report.from_word self.from_excel = self.report.from_excel self.target_word = self.from_word.replace("fromEndStr", "20200305-20200306") self.target_excel = self.from_excel.replace("fromEndStr", "20200305-20200306") self.from_dir = self.report.get_from_dir() self.date_tuple = (datetime.date(2020, 3, 5), datetime.date(2020, 3, 6)) def test_1_copy_word_file(self): self.report.copy_file(self.from_word, self.target_word) assert os.path.exists(os.path.join(self.from_dir, self.target_word)) is True def test_2_replace_date_str(self): content = "lslldf from_end_str" assert ( self.report.replace_date_str(content, "from_end_str", "20200305-20200306") == "lslldf 20200305-20200306" ) def test_4_copy_excel(self): self.report.copy_file(self.from_excel, self.target_excel) assert os.path.exists(os.path.join(self.from_dir, self.target_excel)) is True def test_5_check_and_change(self): self.report.check_word_change(self.target_word, self.date_tuple) def test_9_check_excel_change(self): self.report.check_excel_change(self.target_excel, self.date_tuple) def tearDown(self): print("tearDown...") if __name__ == "__main__": unittest.main()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,856
klgentle/lc_python
refs/heads/master
/vs_code/docxAddPicture.py
from PIL import Image import os import cv2 from docx import Document class SplitPicture(object): def __init__(self): self.path = "/mnt/c/Users/klgentle/Desktop/老婆专用" os.chdir(self.path) def splitPicture(self): # for filename in os.listdir(self.path): for folderName, subfolders, filenames in os.walk(self.path): document = Document() for filename in filenames: if isinstance(filename, list): print(f"filename:{filename} list ") continue if not filename.endswith("png"): continue # if filename in ('2019-05-13 163910.png','2019-05-13 163915.png'): # continue print(f"filename:{filename}") file_whole_name = os.path.join(self.path, folderName, filename) #img = cv2.imread(file_whole_name) #cropped = img[85:660, :] #x, y = cropped.shape[0:2] #img_test1 = cv2.resize(cropped, (int(y / 2.2), int(x / 2.2))) #cv2.imwrite(file_whole_name, img_test1) # doc add picture document.add_picture(file_whole_name) document.save(folderName+'.docx') #def docxAddPicture(self): if __name__ == "__main__": a = SplitPicture() a.splitPicture()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,857
klgentle/lc_python
refs/heads/master
/leet_code/classic_topic/0053_Maximum_Subarray.py
class Solution: def maxSubArray(self, nums: List[int]) -> int: # modify nums inplace, turn nums[i] into sum of nums[k:i] for i in range(1,len(nums)): nums[i] += max(nums[i-1], 0) return max(nums)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,858
klgentle/lc_python
refs/heads/master
/leet_code/easy_case/titleToNumber.py
class Solution: def titleToNumber(self, s: str) -> int: ret = 0 i = 0 l = len(s) while i < l: # ret += 1 ret += ord(s[i]) - ord("A") + 1 if i < l - 1: ret *= 26 i += 1 print(f"s:{s}, ret:{ret}") return ret if __name__ == "__main__": c = Solution() c.titleToNumber("A") c.titleToNumber("AA") c.titleToNumber("ZY")
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,859
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/tree/p0222_Count_Complete_Tree_Nodes.py
""" Given the root of a complete binary tree, return the number of the nodes in the tree. According to Wikipedia, every level, except possibly the last, is completely filled in a complete binary tree, and all nodes in the last level are as far left as possible. It can have between 1 and 2h nodes inclusive at the last level h. Example 1: Input: root = [1,2,3,4,5,6] Output: 6 Example 2: Input: root = [] Output: 0 Example 3: Input: root = [1] Output: 1 """ # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def countNodes1(self, root: TreeNode) -> int: if not root: return 0 return 1 + self.countNodes(root.left) + self.countNodes(root.right) def countNodes(self, root: TreeNode) -> int: if not root: return 0 l = root.left r = root.right hl, hr = 0, 0 while l: l = l.left hl += 1 while r: r = r.right hr += 1 if hl == hr: return 2 ** (hl + 1) - 1 return 1 + self.countNodes(root.left) + self.countNodes(root.right)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,860
klgentle/lc_python
refs/heads/master
/tests/re_test.py
import unittest import re import os import sys from os.path import dirname # 项目根目录 BASE_DIR = dirname(dirname(os.path.abspath(__file__))) # 添加系统环境变量 sys.path.append(BASE_DIR) from database.Procedure import Procedure class ReTest(unittest.TestCase): def test_normalize_report_log_insert(self): """ change INSERT INTO BAT_REPORT_LOG to INSERT INTO BAT_REPORT_LOG use 正则表达式 """ line = "INSERT INTO BAT_REPORT_LOG (DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE)" # modify here batch_insert_pattern = r"\s*INSERT\s+INTO\s+BAT_REPORT_LOG(.*)" if re.search(batch_insert_pattern, line, flags=re.IGNORECASE): # logging.debug('deal_with_report_log_blanks') line = re.sub( batch_insert_pattern, r"INSERT INTO BAT_REPORT_LOG\1", line, flags=re.IGNORECASE, ) print(line) print("test_normalize_report_log_insert end --------------------") # def test_re_search1(self): # print('--------------------') # pattern = r'(from|join)\s+(v|rpt)_' # string_list = ['from v_', '\njoin v_...', 'from rpt_', # 'join rpt_', 'FROM V_', 'JOIN V_', 'FROM RPT_', 'JOIN RPT_'] # for string in string_list: # search_obj = re.search(pattern, string, flags=re.IGNORECASE) # if search_obj: # # add RPTUSER # print(re.sub(pattern, r'\1 RPTUSER.\2_', # string, flags=re.IGNORECASE)) # # print('--------------------') def test_re_search(self): batch_insert_pattern = r"\s*INSERT\s+INTO\s+BAT_REPORT_LOG" test_input = [ "INSERT INTO BAT_REPORT_LOG", " INSERT INTO BAT_REPORT_LOG", "insert into bat_report_log", ] # self.assertEqual(re_batch_insert.search(test_input[0]), True) for string in test_input: if re.search(batch_insert_pattern, string, flags=re.IGNORECASE): print( re.sub( batch_insert_pattern, "INSERT INTO BAT_REPORT_LOG", string, flags=re.IGNORECASE, ) ) # # print(re.sub('class', 'function', 'Class object', flags=re.IGNORECASE)) else: print(string) # def test_re_findall(self): # procedure = Procedure('p_rpt_cif032') # proc_cont = procedure.read_proc_cont() # pattern = r"V_JOB_STEP\s*:=\s*\d+;" # job_step_value_list = re.findall(pattern,proc_cont) # print('----------------') # print(job_step_value_list) # pattern.replace() def test_str_replace(self): a = "abc, abc, bcd, bcd, abc" a = a.replace("abc", "def", 1) print(a) if __name__ == "__main__": unittest.main()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,861
klgentle/lc_python
refs/heads/master
/vs_code/send_mail.py
import smtplib from pysnooper import snoop #@snoop() def send_mail(): from_email = "klgentle@sina.com" to_email = 'jian.dong2@pactera.com' smtpObj = smtplib.SMTP_SSL('smtp.sina.com',465) ret = smtpObj.ehlo() if ret[0] != 250: print("Smtp say hello failed!") return -1 ret = smtpObj.starttls() if ret[0] != 220: print("smtp tsl encode failed!") return -1 password = input(f"Please input your passwd of {from_email}: ") ret = smtpObj.login(from_email, password) if ret[0] != 235: print("Smtp login failed!") return -1 # 电子邮件正文字符串必须以'Subject: \n'开头,作为电子邮件的主题 行。'\n'换行符将主题行与电子邮件的正文分开。 ret = smtpObj.sendmail(from_email,to_email, 'Subject: SMTP mail test, long time no see.\nThanks for all your help, sincerely, klgentle') if ret != {}: print("Smtp send mail failed!") return -1 smtpObj.quit() print("mail send success!") return 0 if __name__ == '__main__': send_mail()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,862
klgentle/lc_python
refs/heads/master
/leet_code/easy_case/maxSubArray_ans.py
class Solution: """ 53. 最大子序和 给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。 Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum. """ def maxSubArray(self, nums: list) -> int: for i in range(1, len(nums)): nums[i] += max(0, nums[i - 1]) return max(nums)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,863
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/tree/P0654_Maximum_Binary_Tree.py
""" You are given an integer array nums with no duplicates. A maximum binary tree can be built recursively from nums using the following algorithm: Create a root node whose value is the maximum value in nums. Recursively build the left subtree on the subarray prefix to the left of the maximum value. Recursively build the right subtree on the subarray suffix to the right of the maximum value. Return the maximum binary tree built from nums. """ # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode: if not nums: return max_value = max(nums) max_index = nums.index(max_value) root = TreeNode(max_value) root.left = self.constructMaximumBinaryTree(nums[:max_index]) # be careful max_index is in the root node root.right = self.constructMaximumBinaryTree(nums[max_index + 1 :]) return root
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,864
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/tree/p0653_Two_Sum_IV_-_Input_is_a_BST.py
""" 653. Two Sum IV - Input is a BST Easy Given the root of a Binary Search Tree and a target number k, return true if there exist two elements in the BST such that their sum is equal to the given target. Example 1: Input: root = [5,3,6,2,4,null,7], k = 9 Output: true Example 2: Input: root = [5,3,6,2,4,null,7], k = 28 Output: false Example 3: Input: root = [2,1,3], k = 4 Output: true Example 4: Input: root = [2,1,3], k = 1 Output: false Example 5: Input: root = [2,1,3], k = 3 Output: true Constraints: The number of nodes in the tree is in the range [1, 104]. -104 <= Node.val <= 104 root is guaranteed to be a valid binary search tree. -105 <= k <= 105 """ class Solution: def __init__(self): self.cache = {} def findTarget(self, root: TreeNode, k: int) -> bool: if not root: return t = k - root.val # print(f"self.cache:{self.cache}") if self.cache.get(t, 0): return True self.cache[root.val] = 1 return self.findTarget(root.left, k) or self.findTarget(root.right, k)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,865
klgentle/lc_python
refs/heads/master
/data_structure/Countable.py
class Countable: counter = 0 def __init__(self): Countable.counter += 1 @classmethod def get_count(cls): return Countable.counter x = Countable() y = Countable() z = Countable() print(Countable.get_count())
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,866
klgentle/lc_python
refs/heads/master
/database/data_base_connect.py
import cx_Oracle def connect(da_name="171"): conn = cx_Oracle.connect('rptuser', 'rptuser','100.11.94.176:1521/odsdb') return conn
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,867
klgentle/lc_python
refs/heads/master
/svn_operate/config_default.py
configs = {"path": { "svn_home_path": "/mnt/e", "svn_procedure_path": "/mnt/e/svn/1300_编码/1301_ODSDB/RPTUSER/05Procedures", "win_svn_procedure_path": r"E:\svn\1300_编码\1301_ODSDB\RPTUSER\05Procedures", "win_svn_home_path": "E:\\"}}
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,868
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/backtracking/p0040_Combination_Sum_II.py
40. Combination Sum II """ Medium Given a collection of candidate numbers (candidates) and a target number (target), find all unique combinations in candidates where the candidate numbers sum to target. Each number in candidates may only be used once in the combination. Note: The solution set must not contain duplicate combinations. Example 1: Input: candidates = [10,1,2,7,6,1,5], target = 8 Output: [ [1,1,6], [1,2,5], [1,7], [2,6] ] Example 2: Input: candidates = [2,5,2,1,2], target = 5 Output: [ [1,2,2], [5] ] """ class Solution: def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]: if min(candidates) > target: return [] if sum(candidates) < target: return [] self.res = [] self.backtrace(sorted(candidates), target, [], 0) return self.res def backtrace(self, cands, target, path, index): if target < 0: return if target == 0: self.res.append(path) return for i in range(index,len(cands)): # pass duplicate value if i > index and cands[i] == cands[i-1]: continue if cands[i] > target: break # i + 1: number only be used once self.backtrace(cands, target - cands[i], path+[cands[i]], i+1)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,869
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/backtracking/p0022_Generate_Parentheses.py
""" 22. Generate Parentheses Medium Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses. Example 1: Input: n = 3 Output: ["((()))","(()())","(())()","()(())","()()()"] Example 2: Input: n = 1 Output: ["()"] """ class Solution: # 编程要非常注意细节 def generateParenthesis(self, n: int) -> List[str]: self.res = [] self.n = n self.backtrace([], 0, 0) return self.res def backtrace(self, path=[], Open=0, Close=0): if len(path) == 2 * self.n: self.res.append("".join(path)) return if Open < self.n: self.backtrace(path+["("], Open+1, Close) if Close < Open: self.backtrace(path+[")"], Open, Close+1)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,870
klgentle/lc_python
refs/heads/master
/leet_code/easy_case/fib.py
class Solution: def fib(self, N:int)->int: F = [0, 1] while len(F) <= N: F.append(F[-1] + F[-2]) return F[N]
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,871
klgentle/lc_python
refs/heads/master
/database/FindViewOriginalTable.py
import os import json import sys import logging logging.basicConfig(level=logging.DEBUG, format="%(levelname)s: %(message)s") class FindViewOriginalTable(object): def __init__(self): # linux # self.view_path = "/home/kl/svn/1300_编码/1301_ODSDB/RPTUSER/03Views" # self.new_path = "/mnt/e/yx_walk/report_develop/new_views" # win self.view_path = r"E:\svn\1300_编码\1301_ODSDB\RPTUSER\03Views" self.new_path = r"E:\yx_walk\report_develop\new_views" # self.new_dict_file_name = "view_original_dict.txt" self.new_dict_file_name = r"view_original_dict.txt" self.view_original_dict = {} def read_view_original_table(self, file_name: str): view_file = os.path.join(self.view_path, file_name) view_content = "" try: with open(view_file) as f: view_content = f.read().strip().upper() # return original table name, such as ODSUSER.CBS_FH00_TU_CUS_DETAILS_M # TODO deal with special data such as " from ods" original_table = view_content.split("FROM ")[1].replace(";", "") return original_table.split("UNION")[0].strip() except: return -1 def read_all_view_original_table(self): for file_name in os.listdir(self.view_path): if not file_name.lower().startswith("v_"): continue # delete .sql for view_name view_name = file_name[:-4].upper() self.view_original_dict[view_name] = self.read_view_original_table( file_name ) return self.view_original_dict def save_original_table_dict(self): # wirte file new_view = os.path.join(self.new_path, self.new_dict_file_name) with open(new_view, "w", encoding="utf-8") as new_f: new_f.write(json.dumps(self.read_all_view_original_table())) def get_view_original_table(self, view_name: str): if view_name.upper().startswith("RPTUSER"): view_name = view_name[8:] # view_dict_file = os.path.join(self.new_path, self.new_dict_file_name) if not os.path.exists(self.new_dict_file_name): self.new_dict_file_name = os.path.join("database", self.new_dict_file_name) with open(self.new_dict_file_name) as f: view_dict = json.loads(f.read()) logging.debug("view_name: %s" % view_name) return view_dict.get(view_name.upper().strip()) def create_view_to_table_sql(self): view_dict_file = os.path.join(self.new_path, self.new_dict_file_name) with open(view_dict_file) as f: view_dict = json.loads(f.read()) view_sql = os.path.join(self.new_path, "view_sql.sql") with open(view_sql, "w", encoding="utf-8") as new_f: for view_name, table_name in view_dict.items(): # V_ACCOUNT_ALL view_short_name = view_name[2:-4] sql = f"insert into view_to_table (view_name, table_name, view_short_name) values('{view_name}','{table_name.strip()}','{view_short_name}'); " # sql_list.append(sql) new_f.write(sql + "\n") if __name__ == "__main__": # use for search table if len(sys.argv) <= 1: print("Please input view_name") sys.exit(1) a = FindViewOriginalTable() # use for search table print(a.get_view_original_table(sys.argv[1])) # save_original_table_dict in local file # a.save_original_table_dict() # original_table = a.read_view_original_table('v_limit_all.sql') # print(original_table) # a.create_view_to_table_sql()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,872
klgentle/lc_python
refs/heads/master
/vs_code/test.py
from PIL import Image import pytesseract #上面都是导包,只需要下面这一行就能实现图片文字识别 file_name ="/mnt/c/Users/klgentle/Desktop/老婆专用/2019版药学(初级西药师)考前押题[专业代码:201]/押题密卷一/专业知识/2019-05-13 223410.png" text = pytesseract.image_to_string(Image.open(file_name),lang='chi_sim') print(text)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,873
klgentle/lc_python
refs/heads/master
/quantitative_trading/quan_test.py
#先引入后面可能用到的包(package) import pandas as pd from datetime import datetime import backtrader as bt import matplotlib.pyplot as plt #%matplotlib inline #在jupyter notebook上运行 #正常显示画图时出现的中文和负号 from pylab import mpl mpl.rcParams['font.sans-serif']=['SimHei'] class my_strategy1(bt.Strategy): #全局设定交易策略的参数 params=( ('maperiod',20), ) def __init__(self): #指定价格序列 self.dataclose=self.datas[0].close # 初始化交易指令、买卖价格和手续费 self.order = None self.buyprice = None self.buycomm = None #添加移动均线指标,内置了talib模块 self.sma = bt.indicators.SimpleMovingAverage( self.datas[0], period=self.params.maperiod) def next(self): if self.order: # 检查是否有指令等待执行, return # 检查是否持仓 if not self.position: # 没有持仓 #执行买入条件判断:收盘价格上涨突破20日均线 if self.dataclose[0] > self.sma[0]: #执行买入 self.order = self.buy(size=500) else: #执行卖出条件判断:收盘价格跌破20日均线 if self.dataclose[0] < self.sma[0]: #执行卖出 self.order = self.sell(size=500) mpl.rcParams['axes.unicode_minus']=False #使用tushare旧版接口获取数据 import tushare as ts def get_data(code,start='2010-01-01',end='2020-03-31'): df=ts.get_k_data(code,autype='qfq',start=start,end=end) df.index=pd.to_datetime(df.date) df['openinterest']=0 df=df[['open','high','low','close','volume','openinterest']] return df dataframe=get_data('600000') #回测期间 start=datetime(2010, 3, 31) end=datetime(2020, 3, 31) # 加载数据 data = bt.feeds.PandasData(dataname=dataframe,fromdate=start,todate=end) # 初始化cerebro回测系统设置 cerebro = bt.Cerebro() #将数据传入回测系统 cerebro.adddata(data) # 将交易策略加载到回测系统中 cerebro.addstrategy(my_strategy1) # 设置初始资本为10,000 startcash = 10000 cerebro.broker.setcash(startcash) # 设置交易手续费为 0.2% cerebro.broker.setcommission(commission=0.002) d1=start.strftime('%Y%m%d') d2=end.strftime('%Y%m%d') print(f'初始资金: {startcash}\n回测期间:{d1}:{d2}') #运行回测系统 cerebro.run() #获取回测结束后的总资金 portvalue = cerebro.broker.getvalue() pnl = portvalue - startcash #打印结果 print(f'总资金: {round(portvalue,2)}') print(f'净收益: {round(pnl,2)}')
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,874
klgentle/lc_python
refs/heads/master
/leet_code/top_interview_questions/Medium_Case/P0078_Subsets.py
""" 78.Subsets Given an integer array nums, return all possible subsets (the power set). The solution set must not contain duplicate subsets. Example 1: Input: nums = [1,2,3] Output: [[],[1],[2],[1,2],[3],[1,3],[2,3],[1,2,3]] Example 2: Input: nums = [0] Output: [[],[0]] """ class Solution: # 利用 list + 运算实现元素的组合 # +类似于list的extend运算 def subsets(self, nums) -> list: res = [[]] for num in nums: res += [item + [num] for item in res] return res
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,875
klgentle/lc_python
refs/heads/master
/tests/test_procedure.py
import unittest import os import sys from os.path import dirname # 项目根目录 BASE_DIR = dirname(dirname(os.path.abspath(__file__))) # 添加系统环境变量 sys.path.append(BASE_DIR) from database.Procedure import Procedure class TestProcedure(unittest.TestCase): def test_deal_with_report_log_blanks(self): a = Procedure('p_rpt_cif035') line1 = " INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE)" line2 = " INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE)" line3 = "INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE)" except_line = "INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE)" self.assertEqual(a.deal_with_report_log_blanks(line1), except_line) self.assertEqual(a.deal_with_report_log_blanks(line2), except_line) self.assertEqual(a.deal_with_report_log_blanks(line3), except_line) def test_deal_with_report_log_blanks2(self): a = Procedure('p_rpt_cif035') line1 = "(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE)" self.assertEqual(a.deal_with_report_log_blanks(line1), line1) if __name__ == "__main__": unittest.main()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,876
klgentle/lc_python
refs/heads/master
/leet_code/easy_case/num_partition.py
from collections import Counter class Solution: def hasGroupsSizeX(self, deck: List[int]) -> bool: if len(deck) <= 1 or len(deck) > 26: return False a = Counter(deck).most_common() num_list = [i[1] for i in a] num_list.sort() print("num_list:", num_list) if len(set(num_list)) == 1 or num_list in ( [2, 4], [4, 6], [3, 3, 3], [4, 8], [2, 6], [2, 8], [2, 10], ): x = set(num_list) print("X: ", x) return True return False # num_list 满足 2, 3,5,7,9的倍数,元素全部满足就可以
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,877
klgentle/lc_python
refs/heads/master
/stock_pandas/textmine.py
import pandas as pd import re from nltk.tokenize import sent_tokenize, word_tokenize from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer lemma = WordNetLemmatizer() # lda model import gensim from gensim import corpora print("reading file...") rawdata = pd.read_csv('disney.csv',header=None, encoding='utf8') # no header provided # def removePrefix(_x): # extract the string # x = re.sub(r"[\\]+", "", _x) # return re.match('^b[\'\"](.+)[\'\"]$', str(x)).group(1) def removePrefix(_x): # extract the string try: return _x[2:-1] except: print(_x) cols = rawdata.shape[1] rows = rawdata.shape[0] for i in range(0, cols): # remove nan columns # print(rawdata[i].isna().sum()) if (rawdata[i].isna().sum() == rows): rawdata=rawdata.drop(columns=(i)) rawdata = rawdata.applymap(removePrefix) rawdata = rawdata.rename(columns={0:'author', 1:'text'}) rawtext = rawdata['text'].tolist() def cleansymbols(x): cleanx = re.sub('(http(s)?://)?[0-9A-Za-z]+\.[0-9A-Za-z]+[0-9A-Za-z/.-]+', '', x) # remove hyperlink cleanx = re.sub("(\[[0-9]\])|[\"/–,;\\:.©£•!\?\-\+~@#›¥%……&*\(\)\|“>”/\']|(\\n)", " ", cleanx) # remove punctuation return cleanx # sentence slice print("sentence tokenize...") sentences = [] for t in rawtext: sentences.extend(sent_tokenize(t)) # clean data for i,sen in enumerate(sentences): try: sentences[i] = cleansymbols(sen) except: print(sentences[i]) # tokenize print("word tokenize...") doc_tokens = [word_tokenize(sen) for sen in sentences] # remove stop words and short words print("cleaning data...") custom_stopwors = ['the'] doc_tokens_removed = [] for j, token_li in enumerate(doc_tokens): new_token_li = [] for i, token in enumerate(token_li): lt = token.lower() if not(lt in stopwords.words('english') or lt in custom_stopwors or len(lt)<3): lemma_token = lemma.lemmatize(lt) # lemmatize if (len(lemma_token)>=3): new_token_li.append(lemma_token) doc_tokens_removed.append(new_token_li) dictionary = corpora.Dictionary(doc_tokens_removed) # transform into dt-matrix dtmat = [dictionary.doc2bow(doc) for doc in doc_tokens_removed] Lda = gensim.models.ldamodel.LdaModel print("modeling...") # multiple sets of params # for i in range(3,8): # print('-------------',"num_topics = ", i,'--------------------') # ldamodel = Lda(dtmat, num_topics=i, id2word = dictionary, passes=50) # for j in range(3, 7): # print("num_words = ", j) # print(ldamodel.show_topics(num_words=j) ) # print('---') # single output lda_passes = 50 lda_num_topics = 4 ldamodel = Lda(dtmat, num_topics=lda_num_topics, id2word = dictionary, passes=lda_passes) print(ldamodel.show_topics())
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,878
klgentle/lc_python
refs/heads/master
/data_structure/maze.py
def passable(maze, pos): return maze(pos[0], pos[1]) == 0 def mark(maze, pos): maze(pos[0], pos[1]) = 2 def maze_resolve(maze, start, end): dirs = ([1, 0], [0, 1], [-1, 0], [0, -1]) if start == end: return st = SStack() mark(maze, start) st.push((start, 0)) while not st.is_empty(): pos, nxt = st.pop() # 回退 for i in range(nxt, 4) nextp = (pos[0]+dir[i][0], pos[1]+dir[i][1]) if nextp == end: print_path(end, pos, start) # TBC return if passable(maze, nextp): st.push((pos, i+1)) mark(maze, pos) st.push(()nextp, 0) break print("No path find!")
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,879
klgentle/lc_python
refs/heads/master
/leet_code/easy_case/findContentChildren_ans.py
class Solution { //贪心的思想是,用尽量小的饼干去满足小需求的孩子,所以需要进行排序先 public int findContentChildren(int[] g, int[] s) { int child = 0; int cookie = 0; Arrays.sort(g); //先将饼干 和 孩子所需大小都进行排序 Arrays.sort(s); while (child < g.length && cookie < s.length ){ //当其中一个遍历就结束 if (g[child] <= s[cookie]){ //当用当前饼干可以满足当前孩子的需求,可以满足的孩子数量+1 child++; } cookie++; // 饼干只可以用一次,因为饼干如果小的话,就是无法满足被抛弃,满足的话就是被用了 } return child; } }
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,880
klgentle/lc_python
refs/heads/master
/leet_code/medium_case/0016_3Sum_Closest.py
""" Given an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. Return the sum of the three integers. You may assume that each input would have exactly one solution. Example: Given array nums = [-1, 2, 1, -4], and target = 1. The sum that is closest to the target is 2. (-1 + 2 + 1 = 2). """ from pysnooper import snoop class Solution: @snoop() def threeSumClosest(self, nums: list, target: int) -> int: total_list = [] sum_closest = 0 nums = sorted(nums) for i in range(len(nums)): if i > 0 and nums[i] == nums[i-1]: continue l, r = i+1, len(nums) - 1 while l < r: total = nums[i] + nums[l] + nums[r] total_list.append(total) if total < target: l += 1 elif total > target: r -= 1 else: return target print(f"total_list:{total_list}") distance = 90120908 for total in total_list: if abs(total - target) <= distance: distance = abs(total - target) sum_closest = total return sum_closest if __name__ == "__main__": lst = [6,-18,-20,-7,-15,9,18,10,1,-20,-17,-19,-3,-5,-19,10,6,-11,1,-17,-15,6,17,-18,-3,16,19,-20,-3,-17,-15,-3,12,1,-9,4,1,12,-2,14,4,-4,19,-20,6,0,-19,18,14,1,-15,-5,14,12,-4,0,-10,6,6,-6,20,-8,-6,5,0,3,10,7,-2,17,20,12,19,-13,-1,10,-1,14,0,7,-3,10,14,14,11,0,-4,-15,-8,3,2,-5,9,10,16,-4,-3,-9,-8,-14,10,6,2,-12,-7,-16,-6,10] target = -52 a = Solution() a.threeSumClosest(lst, target) #print(sorted(lst))
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,881
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/array/p0026_Remove_Duplicates_from_Sorted_Array.py
""" 26. Remove Duplicates from Sorted Array Easy Given a sorted array nums, remove the duplicates in-place such that each element appears only once and returns the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. Clarification: Confused why the returned value is an integer but your answer is an array? Note that the input array is passed in by reference, which means a modification to the input array will be known to the caller as well. """ class Solution: def removeDuplicates(self, nums: List[int]) -> int: slow, fast = 0, 0 while fast < len(nums): while nums[fast] != nums[slow]: slow += 1 #维护 nums[0..slow] 无重复 nums[slow] = nums[fast] fast += 1 return slow + 1
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,882
klgentle/lc_python
refs/heads/master
/svn_operate/date_add.py
import datetime def date_add(day: int) -> str: date=datetime.date.today()+datetime.timedelta(days=day) return date.strftime("%Y%m%d")
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,883
klgentle/lc_python
refs/heads/master
/base_practice/annuity_test.py
month=0 investment=10000 #int(input("Please enter your initial investment: ")) rate=3.6 #float(input("Please enter annual interest rate(ex. 2.5): ")) payout=600 #int(input("Please enter the monthly annuity payout: ")) rate = rate / 100 balance=investment #balance=investment-payout/rate*(1-1/(1+rate/12)**month) if payout>=investment: print("After", month,"months","your balance is {0:.2f}.".format(balance)) else: while balance >=payout: #balance -= payout/((1+rate/12)**month) balance = balance * (1+ rate/12) - payout print(f"balance:{balance}") month+=1 print(f"month:{month}") print("After", month,"months","your balance is {0:.2f}.".format(balance))
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,884
klgentle/lc_python
refs/heads/master
/scrapy_test/imas_report_download.py
import requests #导入requests包 import json def get_report_date(word=None): url = 'http://10.95.64.198:9086/SmartReport/RegularReport' From_data={ 'ajax': 1, 's_id': None, 'type': 'GRKHXX.xls', 'freq': 'ymd', 'branchCap': '000000 - 创兴银行有限公司(管理行汇总)', 'branch': '000000', 'day': '20210331', 'dayCap': '2021年3月31日', 'rmlpar': 'ymd|000000 - 创兴银行有限公司(管理行汇总)|20210331', 'paraDef': 'str|str|str', 'reportid': 'GRKHXX', 'S_ID': 'GRKHXX', 'op': 'view', 'page': '0', 'section': '0' } #请求表单数据 response = requests.post(url,data=From_data) #将Json格式字符串转字典 content = json.loads(response.text) print(content) #打印翻译后的数据 #print(content['translateResult'][0][0]['tgt']) if __name__=='__main__': get_report_date('')
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,885
klgentle/lc_python
refs/heads/master
/leet_code/top_interview_questions/Medium_Case/P0048_Rotate_Image.py
import copy class Solution: def rotate(self, matrix: List[List[int]]) -> None: """ Do not return anything, modify matrix in-place instead. 核心思想,行转列。 此处有一个copy的坑,直接copy原二维数组,数据会变 """ original_matrix = copy.deepcopy(matrix) n = len(matrix) for c in range(n-1,-1,-1): old_row = n-1-c for r in range(n): #print(f"original_matrix[{old_row}][{r}]",original_matrix[old_row][r]) matrix[r][c] = original_matrix[old_row][r] #print(matrix)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,886
klgentle/lc_python
refs/heads/master
/leet_code/easy_case/p1025_Divisor_Game.py
class Solution: def divisorGame(self, N: int) -> bool: # 可以用归纳法证明,当N为偶数时,Alice会win """ N == 2 Alice (Choosing) win; 假设 N == M (偶数) 时 Alice (Choosing) win, 则 M+2 时 Alice 也会 win 当 N = m+1 时, Alice会输,因为奇数只能被奇数整除,所以alice取完数后,剩下偶数,所以对方(Choosing)会赢。 当 n = m+2 (偶数) 时,Alice 先取1,上面已经证明当N=m+1时Choosing 的人会输,所以n = m+2 (偶数) 时,Alice会赢。 """ return N % 2 == 0
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,887
klgentle/lc_python
refs/heads/master
/base_practice/add_and_square.py
import time def add_and_square(number: int): """求指定范围内所有满足 a*a + b*b = c*c 的a,b,c""" for a in range(0, number): for b in range(0, number): c = number - a - b if a**2 + b**2 == c**2: print(f"a : {a}, b: {b}, c:{c}") if __name__ == "__main__": start_time = time.time() add_and_square(1000) print("time:",time.time()-start_time)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,888
klgentle/lc_python
refs/heads/master
/svn_operate/copy_svn_modify.py
import shutil, os import openpyxl import pprint #import codecs import csv import os import time import platform # import pysvn import sys from sys import argv from openpyxl import Workbook from backupToZip import backupToZip from datetime import datetime # from send_mail_with_attach import mail from config_default import configs from create_date import getBetweenDay from SvnOperate import SvnOperate from PlatformOperation import PlatformOperation class CopyRegister(object): """ copy svn/1300_编码 upload register and upload file "usage python[3] copy_upload_ubuntu.py '20190501'" """ def __init__(self, date_str: str, place: str): self.__date_str_list = [0] self.create_date_str_list(date_str) print(f"self.__date_str_list:{self.__date_str_list}") self.init_path(place) self.svn = SvnOperate(self.__svnup_dir) self.svn.update_windows_svn() self.make_or_clean_folder() self.__data_list = [] self.__procedure_name_list = [] self.__error_file_type = set() print("init complete") def init_path(self, place): home_path = configs.get("path").get("svn_home_path") # if self.svn.is_system_windows(): if platform.uname().system == "Windows": home_path = configs.get("path").get("win_svn_home_path") if not os.path.exists(home_path): home_path = "/mnt/e" print(f"home_path:{home_path}") self.code_home = os.path.join(home_path, "svn") from_folder = "发布登记表_UAT" if place.upper() == 'PRD': from_folder = "发布登记表_PRD" self.__register_folder = os.path.join(self.code_home, "1300_编码", from_folder) self.__svnup_dir = os.path.join(self.code_home, "1300_编码") code_beta_path = os.path.join(home_path, "yx_walk", "beta") self.__beta_path = os.path.join(code_beta_path, self.date_str + "beta_" + place) def create_date_str_list(self, date_str): if date_str.find(",") > -1: self.date_str = date_str.split(",")[-1] self.__date_str_list = getBetweenDay(date_str.split(",")[0], self.date_str) else: self.date_str = date_str self.__date_str_list[0] = self.date_str return self.__date_str_list def make_or_clean_folder(self): if os.path.exists(self.__beta_path): print(f"rm -rf {self.__beta_path}") shutil.rmtree(f"{self.__beta_path}") os.makedirs(self.__beta_path, exist_ok=True) def readAllRegister(self): """ copy register """ for folderName, subfolders, filenames in os.walk(self.__register_folder): for filename in filenames: # find right date register excel # filename[-13:-5] is datadate of register #print(f"filename:{filename} test !!!!!!!!!") if filename[-13:-5] not in self.__date_str_list: continue whole_filename = os.path.join(folderName, filename) print(f"whole_filename:{whole_filename}") self.readOneRegister(whole_filename) print(f"data_list count:{len(self.__data_list)}") def readOneRegister(self, whole_filename: str): """ copy register """ # copy excel file content wb = openpyxl.load_workbook(whole_filename) sheet = wb.active # skip head for row in range(2, sheet.max_row + 1): name = sheet["C" + str(row)].value # skip blank record if not name: continue # 20 is hard code, if column is max than 20, should change the value data_row = [sheet[chr(i + ord("A")) + str(row)].value for i in range(0, 20)] path = data_row[4] # skip no path record if not path: print(f"No path, please check register: {data_row}_________________") continue if isinstance(data_row[7], datetime): data_row[7] = data_row[7].strftime("%Y%m%d") self.__data_list.append(data_row) return self.__data_list def get_bo_list(self): bo_name_list = [] for row in self.__data_list: name, file_type, path = row[2:5] if file_type.strip().upper() in ("RPT", "BO"): bo_name_list.append(name.strip()) return sorted(bo_name_list) @staticmethod def filename_normlize(filename): if filename.find(".") > 0: filename = os.path.splitext(filename)[0] return filename @staticmethod def filetype_normlize(file_type): """ > filetype 标准化 """ if not file_type: return "sql" if file_type.upper() == "BO": file_type = "rpt" elif file_type.upper() in ("PRO", "FNC", "PRC"): file_type = "sql" return file_type.lower() @staticmethod def get_schema_folder(path_list): schema_folder = "" if path_list[1] == "1370_水晶报表": schema_folder = "1370_水晶报表" else: schema_folder = path_list[2] return schema_folder def filepath_normlize(self, filepath): """ > filepath 标准化 """ # fix 1300编码 to 1300_编码 # print(f"filepath:{filepath}________________") if filepath.find("1300编码") > -1: filepath = filepath.replace("1300编码", "1300_编码") # cut path from 1300 if filepath.find("1300_编码") > -1: filepath = filepath[filepath.find("1300_编码") :] filepath = PlatformOperation.change_path_sep(filepath) return filepath def register_data_normalize(self): """ TODO 数据标准化 """ file_name_path = map(lambda data_row: data_row[2:5], self.__data_list) # print(f"data_list:{self.__data_list}_________________") file_name_path = map( lambda data: [ self.filename_normlize(data[0].strip()), self.filetype_normlize(data[1].strip()), self.filepath_normlize(data[2].strip()), ], file_name_path, ) return list(file_name_path) def register_data_deal(self, file_name_path: list): file_name_path_list = [" "] * len(file_name_path) for index, data in enumerate(file_name_path): name, file_type, path = data if file_type.upper() in ("RPT", "BO"): name = name.upper() file_folder = file_type if path.endswith("05Procedures"): self.__procedure_name_list.append(name.upper()) file_folder = "pro" whole_file_name = name + "." + file_type file_name_path_list[index] = [whole_file_name, file_folder, path] return file_name_path_list def copyfiles(self, file_name_path_list): # copy code files for whole_file_name, file_folder, path in file_name_path_list: schema_folder = self.get_schema_folder(path.split(os.sep)) source_file = os.path.join(self.code_home, path, whole_file_name) source_file2 = os.path.join(self.code_home, path, whole_file_name.lower()) target_path = os.path.join(self.__beta_path, schema_folder, file_folder) target_path_file = os.path.join( target_path, whole_file_name.replace(" ", "_") ) if not os.path.exists(target_path): os.makedirs(target_path, exist_ok=True) try: # copy ignore upper or lower shutil.copy(source_file, target_path_file) except FileNotFoundError: if os.path.exists(source_file2): shutil.copy(source_file2, target_path_file) else: self.__error_file_type.add(whole_file_name.split(".")[1]) print(f"error! No such file: {source_file} _______________") return self.__error_file_type def saveRegisterExcel(self): "save excel records to one excel" file1 = os.path.join( self.__svnup_dir, "发布登记表_UAT", "cif", "ODS程序版本发布登记表(cif)-template.xlsx" ) file_path_name = self.__beta_path + "/登记表" + self.date_str + ".xlsx" shutil.copy(file1, file_path_name) # wb = Workbook() wb = openpyxl.load_workbook(file_path_name) sheet = wb.active # TODO remove duplicate, unhashable type: 'list'(error of list(set())) for row in self.__data_list: sheet.append(row) wb.save(filename=file_path_name) def createZipfile(self): print(f"file path:{self.__beta_path}") return backupToZip(self.__beta_path) def list_file2(self, path: str, file_name: str, path2: str): to_file = open(file_name, "w") to_file.write(f"set define off\nspool {self.date_str}_{path2}.log\n\n") for file_name in os.listdir(path): # 跳过目标文件 if file_name in ("pro.sql", "list.sql"): continue name_without_type = file_name.split(".")[0] s = f"prompt\nprompt {name_without_type}\n@@{file_name}\nprompt\nprompt ==============================\nprompt\n" to_file.write(s) to_file.write("\nspool off\ncommit;\n") to_file.close() def listSqlFile(self): dc = {"pro": "pro.sql", "sql": "list.sql"} shema_list = [ "CBSUSER", "ODSUSER", "RPTUSER", "CBSUSER_MO", "ODSUSER_MO", "RPTUSER_MO", ] for schema in shema_list: for short_path, file_name in dc.items(): path = os.path.join(self.__beta_path, schema, short_path) whole_file_name = os.path.join(path, file_name) # list file if os.path.exists(path): self.list_file2(path, whole_file_name, short_path) def createConfigCheckSql(self): file_name = os.path.join(self.__beta_path, "config_check.sql") to_file = open(file_name, "w") sql = f"""SELECT OBJECT_NAME FROM ALL_OBJECTS WHERE OWNER = 'RPTUSER' AND OBJECT_TYPE = 'PROCEDURE' AND OBJECT_NAME IN ({", ".join(["'" + name + "'" for name in self.__procedure_name_list])}) AND SUBSTR(OBJECT_NAME,-4) != '_MID' MINUS select OBJECT_NAME from ods_job_config where object_type = 'SP'; """ to_file.write(f"{sql}\n") to_file.close() def write_bo_list(self, file_name="bo_list.txt"): bo_name_list = self.get_bo_list() print("\n请核对今日BO上线清单:\n" + "\n".join(bo_name_list) + "\n") file_name = os.path.join(self.__beta_path, file_name) with open(file_name, "w") as to_file: to_file.write("请核对今日BO上线清单:\n") for b in bo_name_list: to_file.write(b + "\n") # def send_mail(self, file_path=""): # if not file_path: # file_path = os.path.join( # os.path.basename(self.__beta_path), self.date_str + "beta.zip" # ) # mail(self.date_str, file_path) def copy_file_from_register(self): """main function for call""" self.readAllRegister() print("read complete") self.saveRegisterExcel() print("save complete") register_data = self.register_data_normalize() self.__error_file_type = self.copyfiles(self.register_data_deal(register_data)) self.listSqlFile() print("list file complete") #self.createConfigCheckSql() #print("create config done") self.write_bo_list() print("write bo done") self.createZipfile() print("all done") # if only rpt not find, send email # if not self.__error_file_type or self.__error_file_type == {"rpt"}: # a.send_mail() if __name__ == "__main__": # print("usage python[3] copy_upload_ubuntu.py '20190501', UAT") date_str = time.strftime("%Y%m%d", time.localtime()) if len(argv) > 1 and len(argv[1]) == 8: if int(date_str) - int(argv[1]) > 10: pass #print(f"argv[1] {argv[1]} is too small") #sys.exit(1) date_str = argv[1] elif len(argv) > 1: date_str = argv[1] place = argv[2] a = CopyRegister(date_str, place) a.copy_file_from_register() print("Done!")
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,889
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/sliding_window/p0076_Minimum_Window_Substring.py
""" 76. Minimum Window Substring Hard Given two strings s and t, return the minimum window in s which will contain all the characters in t. If there is no such window in s that covers all characters in t, return the empty string "". Note that If there is such a window, it is guaranteed that there will always be only one unique minimum window in s. Example 1: Input: s = "ADOBECODEBANC", t = "ABC" Output: "BANC" Example 2: Input: s = "a", t = "a" Output: "a" """ class Solution: def minWindow(self, s: str, t: str) -> str: need = {} window = {} # t会不会有重复的字符串? 可能会 for c in t: if c in need: need[c] += 1 else: need[c] = 1 left, right = 0, 0 # valid 表示满足条件的字符个数 valid = 0 # 记录结果 start = 0 length = float("inf") while right < len(s): c = s[right] # 右移 right += 1 if c in need: if c in window: window[c] += 1 else: window[c] = 1 # 字符相同且数量一样就加一 if need[c] == window[c]: valid += 1 # 保证字符数的情况下左移窗口 while valid == len(need.keys()): # print(f"window:{window}") if right - left < length: start = left length = right - left d = s[left] # 移动窗口左边 left += 1 if d in need: if need[d] == window[d]: valid -= 1 window[d] -= 1 return s[start : start + length] if length != float("inf") else ""
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,890
klgentle/lc_python
refs/heads/master
/automatic_office/test_docx.py
import unittest import docx class TestDocx(unittest.TestCase): def test_para_run(self): doc = docx.Document('automatic_office/文思员工-8月签到表_新模版_带加班.docx') # print(len(doc.paragraphs)) print(f"styles:{doc.styles}") for style in doc.styles: print(f"doc style name:{style.name}") for ind, para in enumerate(doc.paragraphs): print(f"{ind}: {para.text}, style name:{para.style.name}") for table in doc.tables: print(f"table style name0:{table.style.name}") # print 第二列的内容,包括表头 #for cell in table.column_cells(1): # #print(cell.text) # print(f"table style name:{cell.style.name}") if __name__ == "__main__": unittest.main()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,891
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/union_find/p0990_Satisfiability_of_Equality_Equations.py
""" 990. Satisfiability of Equality Equations Medium Given an array equations of strings that represent relationships between variables, each string equations[i] has length 4 and takes one of two different forms: "a==b" or "a!=b". Here, a and b are lowercase letters (not necessarily different) that represent one-letter variable names. Return true if and only if it is possible to assign integers to variable names so as to satisfy all the given equations. Example 1: Input: ["a==b","b!=a"] Output: false Explanation: If we assign say, a = 1 and b = 1, then the first equation is satisfied, but not the second. There is no way to assign the variables to satisfy both equations. Example 2: Input: ["b==a","a==b"] Output: true Explanation: We could assign a = 1 and b = 1 to satisfy both equations. Example 3: Input: ["a==b","b==c","a==c"] Output: true Example 4: Input: ["a==b","b!=c","c==a"] Output: false Example 5: Input: ["c==c","b==d","x!=z"] Output: true """ import string class Solution: def equationsPossible(self, equations: List[str]) -> bool: def find(x): if x != uf[x]: uf[x] = find(uf[x]) return uf[x] uf = {x:x for x in string.ascii_lowercase} for a, c, _, b in equations: if c == "=": uf[find(a)] = find(b) return not any(c == "!" and find(a) == find(b) for a, c, _, b in equations)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,892
klgentle/lc_python
refs/heads/master
/send_sms/send_sms.py
from twilio.rest import Client from sys import argv # Your Account SID from twilio.com/console account_sid = "ACeef1b02b359fad7bf493f482dd52a669" # Your Auth Token from twilio.com/console auth_token = "d1e6e6ff2d46d7046163b0931e5bb85a" from_phone = "+18070265802" client = Client(account_sid, auth_token) def send_sms(to_phone, body): message = client.messages.create( to=to_phone, from_= from_phone, body=body) return message.sid if __name__ == "__main__": to_phone = argv[1] body = argv[2] #print(f"to_phone:{to_phone}, body:{body}") send_sms(to_phone, body)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,893
klgentle/lc_python
refs/heads/master
/stock_pandas/plt_bar2.py
import matplotlib.pyplot as plt import pandas as pd # x 是x轴,y是y轴, loan = pd.read_csv("loan.csv") x = loan["Credit_score"] y = loan["Credit_score"].groupby(loan["Credit_score"]).count() #print(f"x:{x}") #print(f"y:{y}") #x = [625, 650, 675, 700, 725, 750, 775, 800] #y = [2.0, 2.0, 1.0, 3.0, 4.0, 2.0, 1.0, 2.0] p1 = plt.bar(x.drop_duplicates(), height=y, width=10) plt.title("Credit_score") plt.grid(axis='y') plt.grid(axis='x') plt.show()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,894
klgentle/lc_python
refs/heads/master
/tests/test_proc_log_modify.py
import unittest import os import sys from os.path import dirname # 项目根目录 BASE_DIR = dirname(dirname(os.path.abspath(__file__))) # 添加系统环境变量 sys.path.append(BASE_DIR) from database.ProcedureLogModify import ProcedureLogModify class TestProcLogMod(unittest.TestCase): def test_reset_job_step_value(self): obj = ProcedureLogModify('p_rpt_cif032') print(obj.reset_job_step_value()) #self.assertEqual(obj.add_report_log(proc_cont),except_cont) def test_add_report_log(self): self.maxDiff = None proc_cont ="""V_JOB_ID,'开始处理...'); COMMIT; --重跑---- DELETE FROM RPTUSER.RPT_CIF032_D T WHERE T.DATA_DATE = V_DEAL_DATE; INSERT INTO RPTUSER.TEMP_RPT_CIF032_D_1( CUST_ID ,DATA_DATE ,DATA_AREA ,L_EMP_NAME ,L_LAST_KYC_RD ,L_A_NEXT_KYC_RD ,L_M_NEXT_KYC_RD ,L_HRISK_CODE ,L_RISK_CLASS ,L_THESH_TIER ,L_THESH_CR ) SELECT T1.ID ,T1.DATA_DATE ,T1.DATA_AREA ,T1.M_L_EMP_NAME ,T1.L_LAST_KYC_RD ,T1.L_A_NEXT_KYC_RD ,T1.L_M_NEXT_KYC_RD ,T1.M_L_HRISK_CODE ,T2.DESCRIPTION -- DONGJIAN 20190827 ,T1.L_THESH_TIER ,T1.L_THESH_CR FROM RPTUSER.RPT_CUSTOMER_MID T1 LEFT JOIN ODSUSER.CBS_F_EB_LOOKUP_M T2 ON T2.ID = 'CUS.AML.RISK.RATE*' || T1.L_AML_RISK_RAT AND T2.DATA_DATE = V_DEAL_DATE AND T2.M = '1' AND T2.S = '1' WHERE T1.DATA_DATE = V_DEAL_DATE --AND T1.M_L_EMP_NAME IS NOT NULL --AND T1.M_L_HRISK_CODE IS NOT NULL AND T1.DATA_AREA='FH00'; INSERT INTO RPTUSER.TEMP_RPT_CIF032_D_2( CUST_ID ,DATA_DATE ,DATA_AREA ,L_EMP_NAME ,L_LAST_KYC_RD ,L_A_NEXT_KYC_RD ,L_M_NEXT_KYC_RD ,L_HRISK_CODE ,L_RISK_CLASS ,L_THESH_TIER ,L_THESH_CR ) SELECT T1.ID ,T1.DATA_DATE ,T1.DATA_AREA ,T1.M_L_EMP_NAME ,T1.L_LAST_KYC_RD ,T1.L_A_NEXT_KYC_RD ,T1.L_M_NEXT_KYC_RD ,T1.M_L_HRISK_CODE ,T2.DESCRIPTION -- DONGJIAN 20190827 ,T1.L_THESH_TIER ,T1.L_THESH_CR FROM RPTUSER.RPT_CUSTOMER_MID T1 LEFT JOIN ODSUSER.CBS_F_EB_LOOKUP_M T2 ON T2.ID = 'CUS.AML.RISK.RATE*' || T1.L_AML_RISK_RAT AND T2.DATA_DATE = V_DEAL_DATE AND T2.M = '1' AND T2.S = '1' WHERE T1.DATA_DATE = V_LASTWORK_DATE --AND T1.M_L_EMP_NAME IS NOT NULL --AND T1.M_L_HRISK_CODE IS NOT NULL AND T1.DATA_AREA='FH00'; INSERT INTO RPTUSER.TEMP_RPT_CIF032_D_3( CUST_ID ,DATA_DATE ,DATA_AREA ,L_EMP_NAME ,L_LAST_KYC_RD ,L_A_NEXT_KYC_RD ,L_M_NEXT_KYC_RD ,L_HRISK_CODE ,L_RISK_CLASS ,L_THESH_TIER ,L_THESH_CR ) SELECT T1.ID ,T1.DATA_DATE ,T1.DATA_AREA ,T1.M_L_EMP_NAME ,T1.L_LAST_KYC_RD ,T1.L_A_NEXT_KYC_RD ,T1.L_M_NEXT_KYC_RD ,T1.M_L_HRISK_CODE ,T2.DESCRIPTION -- DONGJIAN 20190827 ,T1.L_THESH_TIER ,T1.L_THESH_CR FROM RPTUSER.RPT_CUSTOMER_MID T1 LEFT JOIN ODSUSER.CBS_F_EB_LOOKUP_M T2 ON T2.ID = 'CUS.AML.RISK.RATE*' || T1.L_AML_RISK_RAT AND T2.DATA_DATE = V_DEAL_DATE AND T2.M = '1' AND T2.S = '1' WHERE T1.DATA_DATE = V_LASTWORK_DATE --AND T1.M_L_EMP_NAME IS NOT NULL --AND T1.M_L_HRISK_CODE IS NOT NULL AND T1.DATA_AREA='FH00'; COMMIT; V_END_TIME:=CURRENT_TIMESTAMP ; --处理结束时间 V_SPEND_TIME:=ROUND(TO_NUMBER(TO_DATE(TO_CHAR(V_END_TIME,'YYYY-MM-DD HH24:MI:SS') ,'YYYY-MM-DD HH24:MI:SS') -TO_DATE(TO_CHAR(V_ST_TIME,'YYYY-MM-DD HH24:MI:SS') ,'YYYY-MM-DD HH24:MI:SS')) * 24 * 60 * 60);----执行时间 V_JOB_STEP:=1; INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE) VALUES(BAT_SERIAL_NO.NEXTVAL, V_DEAL_DATE,V_JOB_STEP,V_JOB_NAME,V_SPEND_TIME,V_ERR_MSG, """ except_cont ="""V_JOB_ID,'开始处理...'); COMMIT; --重跑---- DELETE FROM RPTUSER.RPT_CIF032_D T WHERE T.DATA_DATE = V_DEAL_DATE; INSERT INTO RPTUSER.TEMP_RPT_CIF032_D_1( CUST_ID ,DATA_DATE ,DATA_AREA ,L_EMP_NAME ,L_LAST_KYC_RD ,L_A_NEXT_KYC_RD ,L_M_NEXT_KYC_RD ,L_HRISK_CODE ,L_RISK_CLASS ,L_THESH_TIER ,L_THESH_CR ) SELECT T1.ID ,T1.DATA_DATE ,T1.DATA_AREA ,T1.M_L_EMP_NAME ,T1.L_LAST_KYC_RD ,T1.L_A_NEXT_KYC_RD ,T1.L_M_NEXT_KYC_RD ,T1.M_L_HRISK_CODE ,T2.DESCRIPTION -- DONGJIAN 20190827 ,T1.L_THESH_TIER ,T1.L_THESH_CR FROM RPTUSER.RPT_CUSTOMER_MID T1 LEFT JOIN ODSUSER.CBS_F_EB_LOOKUP_M T2 ON T2.ID = 'CUS.AML.RISK.RATE*' || T1.L_AML_RISK_RAT AND T2.DATA_DATE = V_DEAL_DATE AND T2.M = '1' AND T2.S = '1' WHERE T1.DATA_DATE = V_DEAL_DATE --AND T1.M_L_EMP_NAME IS NOT NULL --AND T1.M_L_HRISK_CODE IS NOT NULL AND T1.DATA_AREA='FH00'; COMMIT; V_END_TIME:=CURRENT_TIMESTAMP ; --处理结束时间 V_SPEND_TIME:=ROUND(TO_NUMBER(TO_DATE(TO_CHAR(V_END_TIME,'YYYY-MM-DD HH24:MI:SS') ,'YYYY-MM-DD HH24:MI:SS') -TO_DATE(TO_CHAR(V_ST_TIME,'YYYY-MM-DD HH24:MI:SS') ,'YYYY-MM-DD HH24:MI:SS')) * 24 * 60 * 60);----执行时间 V_JOB_STEP:=1; INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE) VALUES(BAT_SERIAL_NO.NEXTVAL, V_DEAL_DATE,V_JOB_STEP,V_JOB_NAME,V_SPEND_TIME,V_ERR_MSG,V_JOB_ID,V_JOB_STEP||'结束...'); COMMIT; INSERT INTO RPTUSER.TEMP_RPT_CIF032_D_2( CUST_ID ,DATA_DATE ,DATA_AREA ,L_EMP_NAME ,L_LAST_KYC_RD ,L_A_NEXT_KYC_RD ,L_M_NEXT_KYC_RD ,L_HRISK_CODE ,L_RISK_CLASS ,L_THESH_TIER ,L_THESH_CR ) SELECT T1.ID ,T1.DATA_DATE ,T1.DATA_AREA ,T1.M_L_EMP_NAME ,T1.L_LAST_KYC_RD ,T1.L_A_NEXT_KYC_RD ,T1.L_M_NEXT_KYC_RD ,T1.M_L_HRISK_CODE ,T2.DESCRIPTION -- DONGJIAN 20190827 ,T1.L_THESH_TIER ,T1.L_THESH_CR FROM RPTUSER.RPT_CUSTOMER_MID T1 LEFT JOIN ODSUSER.CBS_F_EB_LOOKUP_M T2 ON T2.ID = 'CUS.AML.RISK.RATE*' || T1.L_AML_RISK_RAT AND T2.DATA_DATE = V_DEAL_DATE AND T2.M = '1' AND T2.S = '1' WHERE T1.DATA_DATE = V_LASTWORK_DATE --AND T1.M_L_EMP_NAME IS NOT NULL --AND T1.M_L_HRISK_CODE IS NOT NULL AND T1.DATA_AREA='FH00'; COMMIT; V_END_TIME:=CURRENT_TIMESTAMP ; --处理结束时间 V_SPEND_TIME:=ROUND(TO_NUMBER(TO_DATE(TO_CHAR(V_END_TIME,'YYYY-MM-DD HH24:MI:SS') ,'YYYY-MM-DD HH24:MI:SS') -TO_DATE(TO_CHAR(V_ST_TIME,'YYYY-MM-DD HH24:MI:SS') ,'YYYY-MM-DD HH24:MI:SS')) * 24 * 60 * 60);----执行时间 V_JOB_STEP:=1; INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE) VALUES(BAT_SERIAL_NO.NEXTVAL, V_DEAL_DATE,V_JOB_STEP,V_JOB_NAME,V_SPEND_TIME,V_ERR_MSG,V_JOB_ID,V_JOB_STEP||'结束...'); COMMIT; INSERT INTO RPTUSER.TEMP_RPT_CIF032_D_3( CUST_ID ,DATA_DATE ,DATA_AREA ,L_EMP_NAME ,L_LAST_KYC_RD ,L_A_NEXT_KYC_RD ,L_M_NEXT_KYC_RD ,L_HRISK_CODE ,L_RISK_CLASS ,L_THESH_TIER ,L_THESH_CR ) SELECT T1.ID ,T1.DATA_DATE ,T1.DATA_AREA ,T1.M_L_EMP_NAME ,T1.L_LAST_KYC_RD ,T1.L_A_NEXT_KYC_RD ,T1.L_M_NEXT_KYC_RD ,T1.M_L_HRISK_CODE ,T2.DESCRIPTION -- DONGJIAN 20190827 ,T1.L_THESH_TIER ,T1.L_THESH_CR FROM RPTUSER.RPT_CUSTOMER_MID T1 LEFT JOIN ODSUSER.CBS_F_EB_LOOKUP_M T2 ON T2.ID = 'CUS.AML.RISK.RATE*' || T1.L_AML_RISK_RAT AND T2.DATA_DATE = V_DEAL_DATE AND T2.M = '1' AND T2.S = '1' WHERE T1.DATA_DATE = V_LASTWORK_DATE --AND T1.M_L_EMP_NAME IS NOT NULL --AND T1.M_L_HRISK_CODE IS NOT NULL AND T1.DATA_AREA='FH00'; COMMIT; V_END_TIME:=CURRENT_TIMESTAMP ; --处理结束时间 V_SPEND_TIME:=ROUND(TO_NUMBER(TO_DATE(TO_CHAR(V_END_TIME,'YYYY-MM-DD HH24:MI:SS') ,'YYYY-MM-DD HH24:MI:SS') -TO_DATE(TO_CHAR(V_ST_TIME,'YYYY-MM-DD HH24:MI:SS') ,'YYYY-MM-DD HH24:MI:SS')) * 24 * 60 * 60);----执行时间 V_JOB_STEP:=1; INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE) VALUES(BAT_SERIAL_NO.NEXTVAL, V_DEAL_DATE,V_JOB_STEP,V_JOB_NAME,V_SPEND_TIME,V_ERR_MSG, """ obj = ProcedureLogModify('p_rpt_cif032') self.assertEqual(obj.add_report_log(proc_cont),except_cont) def test_set_job_step_value(self): proc_cont = """ INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE) VALUES(BAT_SERIAL_NO.NEXTVAL, V_DEAL_DATE,'1',V_JOB_NAME,V_SPEND_TIME,V_ERR_MSG,V_JOB_ID,'结束处理...'); """ except_out = """ V_JOB_STEP:=1; INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE) VALUES(BAT_SERIAL_NO.NEXTVAL, V_DEAL_DATE,V_JOB_STEP,V_JOB_NAME,V_SPEND_TIME,V_ERR_MSG,V_JOB_ID,'结束处理...'); """ obj = ProcedureLogModify("p_rpt_cif035") out = obj.set_job_step_value_and_modify_insert(proc_cont) #print(f"out:{out}") self.assertEqual(except_out, out) def test_modify_procedure_between_report_log(self): proc_cont = """V_ERR_MSG := '100001输入参数[I_RUN_DATE]值为空' ; RAISE EX_DEAL ; END IF ; IF LENGTH(I_RUN_DATE) <> 8 THEN V_ERR_MSG := '100002输入参数[I_RUN_DATE]格式错误(YYYYMMDD),输入格式为:' || I_RUN_DATE ; RAISE EX_DEAL ; END IF ; INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE) VALUES(BAT_SERIAL_NO.NEXTVAL, V_DEAL_DATE,'0',V_JOB_NAME,0,V_ERR_MSG,""" except_str = """V_ERR_MSG := '100001输入参数[I_RUN_DATE]值为空' ; RAISE EX_DEAL ; END IF ; IF LENGTH(I_RUN_DATE) <> 8 THEN V_ERR_MSG := '100002输入参数[I_RUN_DATE]格式错误(YYYYMMDD),输入格式为:' || I_RUN_DATE ; RAISE EX_DEAL ; END IF ; V_JOB_STEP:=0; INSERT INTO BAT_REPORT_LOG(DEAL_SERIAL_NO,DEAL_DATE,JOB_STEP,JOB_NAME,SPEND_TIME,REMARK,JOB_ID,JOB_STATE) VALUES(BAT_SERIAL_NO.NEXTVAL, V_DEAL_DATE,V_JOB_STEP,V_JOB_NAME,0,V_ERR_MSG,""" obj = ProcedureLogModify("p_rpt_cif035") out_bool = obj.is_log_exists_and_need_modify(proc_cont) # test pass self.assertEqual(True, out_bool) out1 = obj.set_job_step_value_and_modify_insert(proc_cont) #print(f"out:{out1}") self.assertEqual(except_str, out1) out0 = obj.modify_report_log(proc_cont) #print(f"out:{out0}") self.assertEqual(except_str, out0) out = obj.modify_log_register(proc_cont) #print(f"out:{out}") self.assertEqual(except_str, out) if __name__ == "__main__": unittest.main()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,895
klgentle/lc_python
refs/heads/master
/wechat/VisitRecordDC.py
# py3.7 @dataclass(unsafe_hash=True) class VisitRecordDC(object): first_name: str last_name: str phone_number: str # 跳过“访问时间”字段,不作为任何对比条件 date_visited: str = field(hash=False, compare=False) def find_potential_customers_v4(): return set(VisitRecordDC(**r) for r in users_visited_puket) - set( VisitRecordDC(**r) for r in users_visited_nz ) if __name__ == "__main__": # 去过普吉岛的人员数据 users_visited_puket = [ { "first_name" : "Sirena" , "last_name" : "Gross" , "phone_number" : "650-568-0388" , "date_visited" : "2018-03-14" }, { "first_name" : "James" , "last_name" : "Ashcraft" , "phone_number" : "412-334-4380" , "date_visited" : "2014-09-16" }, ] # 去过新西兰的人员数据 users_visited_nz = [ { "first_name" : "Justin" , "last_name" : "Malcom" , "phone_number" : "267-282-1964" , "date_visited" : "2011-03-13" }, { "first_name" : "Albert" , "last_name" : "Potter" , "phone_number" : "702-249-3714" , "date_visited" : "2013-09-11" } ] obj = VisitRecordDC() print(obj.find_potential_customers_v4())
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,896
klgentle/lc_python
refs/heads/master
/svn_operate/skip_table_structure_change.py
def skip_table_structure_change(commit_list): """ 如果即写了modify文件,又改了表结构,则不将表结构变更写入登记表 """ folder_index = 4 has_modify = False modify_tables = [] for row in commit_list: if row[folder_index].endswith("01Tables\\modify"): has_modify = True elif row[folder_index].endswith("01Tables"): modify_tables.append(row) if has_modify and len(modify_tables) > 0: # delete row in commit_list for row in modify_tables: print("Warning, skip row !!!!!!!!!!!!!!!: \n", row) commit_list.remove(row) return commit_list if __name__ == "__main__": commit_list = [ [ "", "报表", "TMP_CIF151_DE_ADDRESSdd", "sql", "1000_编辑区\\1300_编码\\1301_ODSDB\\RPTUSER\\01Tables\\modify", "Dongjian", "Gene", "20210112", "", "", "", ], [ "", "报表", "TMP_CIF151_DE_Sdd", "sql", "1000_编辑区\\1300_编码\\1301_ODSDB\\RPTUSER\\01Tables\\modify", "Dongjian", "Gene", "20210112", "", "", "", ], [ "", "报表", "TMP_CIF151_DE_ADDRESS", "sql", "1000_编辑区\\1300_编码\\1301_ODSDB\\RPTUSER\\01Tables", "Dongjian", "Gene", "20210112", "", "", "", ], [ "CIF", "报表", "p_rpt_cif151", "sql", "1000_编辑区\\1300_编码\\1301_ODSDB\\RPTUSER\\05Procedures", "Dongjian", "Gene", "20210112", "", "", "", ], [ "CIF", "报表", "p_rpt_cif911b", "sql", "1000_编辑区\\1300_编码\\1301_ODSDB\\RPTUSER\\05Procedures", "Dongjian", "Gene", "20210112", "", "", "", ], [ "CIF", "报表", "RPT_CIF151_D", "rpt", "1000_编辑区\\1300_编码\\1370_水晶报表\\CIF", "Dongjian", "Gene", "20210112", "", "", "", ], [ "", "报表", "TMP_CIF151_DE_ADDRikkkESS", "sql", "1000_编辑区\\1300_编码\\1301_ODSDB\\RPTUSER\\01Tables", "Dongjian", "Gene", "20210112", "", "", "", ], [ "", "报表", "TMP_CIF151_DEddd_ADDRESS", "sql", "1000_编辑区\\1300_编码\\1301_ODSDB\\RPTUSER\\01Tables", "Dongjian", "Gene", "20210112", "", "", "", ], [ "CIF", "报表", "RPT_CIF151_D_CSV", "rpt", "1000_编辑区\\1300_编码\\1370_水晶报表\\CSV格式", "Dongjian", "Gene", "20210112", "", "", "", ], ] print("commit list before delete:") for row in commit_list: print(row) commit_list = skip_table_structure_change(commit_list) print("commit list after delete:") for row in commit_list: print(row)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,897
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/array/p0046_Permutations.py
""" 46. Permutations Medium Given an array nums of distinct integers, return all the possible permutations. You can return the answer in any order. Example 1: Input: nums = [1,2,3] Output: [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]] Example 2: Input: nums = [0,1] Output: [[0,1],[1,0]] Example 3: Input: nums = [1] Output: [[1]] """ class Solution: def permute(self, nums: List[int]) -> List[List[int]]: def backtrace(nums, trace=[], result=[]): if not nums: # reason why we are copying here is because at "lists are passed by reference" and since we are maintaining only one path #result.append(trace[::]) result.append(trace.copy()) # 需要复制trace, list.copy() 等价于 [::] for i in range(len(nums)): newNums = nums[:i] + nums[i+1:] trace.append(nums[i]) backtrace(newNums, trace, result) trace.pop() return result return backtrace(nums)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,898
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/tree/p0449_Serialize_and_Deserialize_BST.py
""" Serialization is converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer, or transmitted across a network connection link to be reconstructed later in the same or another computer environment. Design an algorithm to serialize and deserialize a binary search tree. There is no restriction on how your serialization/deserialization algorithm should work. You need to ensure that a binary search tree can be serialized to a string, and this string can be deserialized to the original tree structure. The encoded string should be as compact as possible. Example 1: Input: root = [2,1,3] Output: [2,1,3] Example 2: Input: root = [] Output: [] """ # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Codec: def serialize(self, root: TreeNode) -> str: """Encodes a tree to a single string. """ if not root: return "#" left = self.serialize(root.left) right = self.serialize(root.right) return f"{root.val},{left},{right}" def deserializeFromList(self, data_list) -> TreeNode: if not data_list: return root_val = data_list.pop(0) if root_val == "#": return root = TreeNode(int(root_val)) root.left = self.deserializeFromList(data_list) root.right = self.deserializeFromList(data_list) return root def deserialize(self, data: str) -> TreeNode: """Decodes your encoded data to tree. """ if data == "#": return data_list = data.split(",") return self.deserializeFromList(data_list) # Your Codec object will be instantiated and called as such: # Your Codec object will be instantiated and called as such: # ser = Codec() # deser = Codec() # tree = ser.serialize(root) # ans = deser.deserialize(tree) # return ans
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,899
klgentle/lc_python
refs/heads/master
/automatic_office/__init__.py
import os import sys # 绝对路径的import sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,900
klgentle/lc_python
refs/heads/master
/data_structure/c9_2_insert_sort.py
def insert_sort(lst): for i in range(1, len(lst)): # 开始时片段[0:1]已排序 x = lst[i] j = i while j > 0 and lst[j - 1].key > x.key: lst[j] = lst[j - 1] # 反序逐个后移元素,确定插入位置 j -= 1 lst[j] = x
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,901
klgentle/lc_python
refs/heads/master
/svn_operate/PlatformOperation.py
import os class PlatformOperation(object): @staticmethod def change_path_sep(path): if path.find("\\") > -1 and os.sep != "\\": path = path.replace("\\", os.sep) elif path.find("/") > -1 and os.sep != "/": path = path.replace("/", os.sep) return path if __name__ == "__main__": obj = PlatformOperation() path = "lc_python\\svn_operate" #print(obj.change_path_sep(path)) assert(obj.change_path_sep(path) == path.replace("\\",'/'))
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,902
klgentle/lc_python
refs/heads/master
/leet_code/medium_case/p1195_Fizz_Buzz_Multithreaded.py
from threading import Semaphore class FizzBuzz: def __init__(self, n: int): self.n = n self.sem = Semaphore() # default is 1 self.sem3 = Semaphore(0) self.sem5 = Semaphore(0) self.sem15 = Semaphore(0) # printFizz() outputs "fizz" def fizz(self, printFizz: 'Callable[[], None]') -> None: i = 3 while i <= self.n: self.sem3.acquire() printFizz() i += 3 if i % 5 == 0: i += 3 self.sem.release() # printBuzz() outputs "buzz" def buzz(self, printBuzz: 'Callable[[], None]') -> None: i = 5 while i <= self.n: self.sem5.acquire() printBuzz() i += 5 if i % 3 == 0: i += 5 self.sem.release() # printFizzBuzz() outputs "fizzbuzz" def fizzbuzz(self, printFizzBuzz: 'Callable[[], None]') -> None: for i in range(15, self.n + 1, 15): self.sem15.acquire() printFizzBuzz() self.sem.release() # printNumber(x) outputs "x", where x is an integer. def number(self, printNumber: 'Callable[[int], None]') -> None: for i in range(1, self.n + 1): self.sem.acquire() if i % 15 == 0: self.sem15.release() elif i % 5 == 0: self.sem5.release() elif i % 3 == 0: self.sem3.release() else: printNumber(i) self.sem.release()
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,903
klgentle/lc_python
refs/heads/master
/leet_code/hard_case/p0037_Sudoku_Solver_3.py
import heapq class Solution: #Does a DFS def solve(self, board, heap): if len(heap)==0: return True # (len(remaining), idy, idx, remaining) procitem = heapq.heappop(heap) if procitem[0] == 0: return False for value in procitem[3]: board[procitem[1]][procitem[2]] = value newheap = [] for size, idy, idx, remaining in heap: matchedrow = procitem[1]==idy matchedcol = procitem[2]==idx matchedbox = (procitem[1]//3*3+procitem[2]//3)==(idy//3*3+idx//3) if (matchedrow or matchedcol or matchedbox) and value in remaining: heapq.heappush(newheap,(size-1,idy, idx, remaining - {value})) else: heapq.heappush(newheap, (size, idy, idx, remaining)) if self.solve(board, newheap): return True return False def solveSudoku(self, board: List[List[str]]) -> None: """ Do not return anything, modify board in-place instead. """ allnums = {'1','2','3','4','5','6','7','8','9'} rows = [set() for i in range(9)] cols = [set() for i in range(9)] boxes = [set() for i in range(9)] rowtodo = {} coltodo = {} todo = set() for idy, row in enumerate(board): for idx, val in enumerate(row): if val == '.': todo.add((idy,idx)) else: rows[idy].add(val) cols[idx].add(val) idbox = idy//3*3+idx//3 boxes[idbox].add(val) heap = [] for (idy, idx) in todo: row = rows[idy] col = cols[idx] box = boxes[idy//3*3+idx//3] remaining = allnums - row - col - box heapq.heappush(heap, (len(remaining), idy, idx, remaining)) self.solve(board, heap)
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}
57,904
klgentle/lc_python
refs/heads/master
/leet_code/labuladong/tree/p0103_Binary_Tree_Zigzag_Level_Order_Traversal.py
""" Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then right to left for the next level and alternate between). For example: Given binary tree [3,9,20,null,null,15,7], 3 / \ 9 20 / \ 15 7 return its zigzag level order traversal as: [ [3], [20,9], [15,7] ] """ from collections import deque class Solution: def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]: q, result = deque(), [] redirect = False if root: q.append(root) while q: level = [] for _ in range(len(q)): node = q.popleft() level.append(node.val) if node.left: q.append(node.left) if node.right: q.append(node.right) # print(f"redirect:{redirect}, level:{level}") # 调整加入顺序 if redirect: level.reverse() result.append(level) # change redirect redirect = not redirect return result
{"/tests/testCreateWeekReport.py": ["/automatic_office/CreateWeekReport.py"], "/tests/re_test.py": ["/database/Procedure.py"], "/tests/test_procedure.py": ["/database/Procedure.py"], "/tests/test_proc_log_modify.py": ["/database/ProcedureLogModify.py"], "/database/batch_replace.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"], "/database/Procedure.py": ["/string_code/StringFunctions.py", "/decorator_test/logging_decorator.py"], "/tests/testDealInterval.py": ["/automatic_office/DealInterval.py"], "/automatic_office/CreateWeekReport.py": ["/automatic_office/CheckInForm.py", "/automatic_office/DealInterval.py"], "/database/AutoViewReplace.py": ["/database/Procedure.py", "/database/FindViewOriginalTable.py"], "/automatic_office/CheckInForm.py": ["/automatic_office/Holiday.py"], "/database/ProcedureLogModify.py": ["/database/Procedure.py"], "/tests/test_string.py": ["/string_code/StringFunctions.py"], "/database/replace_view_and_log.py": ["/database/ProcedureLogModify.py", "/database/AutoViewReplace.py"]}