File size: 80,927 Bytes
5980447
1
2
{"repo": "learningOrchestra/mlToolKits", "pull_number": 108, "instance_id": "learningOrchestra__mlToolKits-108", "issue_numbers": "", "base_commit": "6a83b4bcbdf5096b7ed855bcebab358511d19f2c", "patch": "diff --git a/microservices/data_type_handler_image/data_type_handler.py b/microservices/data_type_handler_image/data_type_handler.py\n--- a/microservices/data_type_handler_image/data_type_handler.py\n+++ b/microservices/data_type_handler_image/data_type_handler.py\n@@ -1,4 +1,7 @@\n from pymongo import MongoClient\n+from datetime import datetime\n+import pytz\n+from concurrent.futures import ThreadPoolExecutor\n \n \n class DataTypeConverter:\n@@ -7,8 +10,10 @@ class DataTypeConverter:\n     STRING_TYPE = \"string\"\n     NUMBER_TYPE = \"number\"\n \n-    def __init__(self, database_connector):\n+    def __init__(self, database_connector, metadata_handler):\n         self.database_connector = database_connector\n+        self.thread_pool = ThreadPoolExecutor()\n+        self.metadata_handler = metadata_handler\n \n     def field_converter(self, filename, field, field_type):\n         query = {}\n@@ -27,9 +32,9 @@ def field_converter(self, filename, field, field_type):\n \n             elif field_type == self.NUMBER_TYPE:\n                 if (\n-                    document[field] == int\n-                    or document[field] == float\n-                    or document[field] is None\n+                        document[field] == int\n+                        or document[field] == float\n+                        or document[field] is None\n                 ):\n                     continue\n                 if document[field] == \"\":\n@@ -42,15 +47,53 @@ def field_converter(self, filename, field, field_type):\n \n             self.database_connector.update_one(filename, values, document)\n \n-    def file_converter(self, filename, fields_dictionary):\n+    def convert_existent_file(self, filename, fields_dictionary):\n+\n+        self.metadata_handler.update_finished_metadata_file(filename, False)\n+\n+        self.thread_pool.submit(self.field_file_converter, filename,\n+                                fields_dictionary)\n \n+    def field_file_converter(self, filename, fields_dictionary):\n         for field in fields_dictionary:\n-            self.field_converter(filename, field, fields_dictionary[field])\n+            self.field_converter(filename, field,\n+                                 fields_dictionary[field])\n+\n+        self.metadata_handler.update_finished_metadata_file(filename, True)\n+\n+\n+class FileMetadataHandler:\n+    def __init__(self, database_connector):\n+        self.database_connector = database_connector\n+\n+    def create_metadata_file(self, filename):\n+        timezone_london = pytz.timezone(\"Etc/Greenwich\")\n+        london_time = datetime.now(timezone_london)\n+\n+        metadata_file = {\n+            \"filename\": filename,\n+            \"time_created\": london_time.strftime(\"%Y-%m-%dT%H:%M:%S-00:00\"),\n+            \"_id\": 0,\n+            \"finished\": False,\n+            \"type\": \"dataType\"\n+        }\n+        self.database_connector.insert_one_in_file(filename, metadata_file)\n+\n+    def update_finished_metadata_file(self, filename, flag):\n+        metadata_new_value = {\n+            \"finished\": flag,\n+        }\n+        metadata_query = {\n+            \"_id\": 0\n+        }\n+        self.database_connector.update_one(filename, metadata_new_value,\n+                                           metadata_query)\n \n \n class MongoOperations:\n-    def __init__(self, database_url, database_port, database_name):\n-        self.mongo_client = MongoClient(database_url, int(database_port))\n+    def __init__(self, database_url, replica_set, database_port, database_name):\n+        self.mongo_client = MongoClient(\n+            database_url + '/?replicaSet=' + replica_set, int(database_port))\n         self.database = self.mongo_client[database_name]\n \n     def find(self, filename, query):\n@@ -69,6 +112,10 @@ def find_one(self, filename, query):\n         file_collection = self.database[filename]\n         return file_collection.find_one(query)\n \n+    def insert_one_in_file(self, filename, json_object):\n+        file_collection = self.database[filename]\n+        file_collection.insert_one(json_object)\n+\n \n class DataTypeHandlerRequestValidator:\n     MESSAGE_INVALID_FIELDS = \"invalid_fields\"\n@@ -92,11 +139,13 @@ def fields_validator(self, filename, fields):\n \n         filename_metadata_query = {\"filename\": filename}\n \n-        filename_metadata = self.database.find_one(filename, filename_metadata_query)\n+        filename_metadata = self.database.find_one(filename,\n+                                                   filename_metadata_query)\n \n         for field in fields:\n             if field not in filename_metadata[\"fields\"]:\n                 raise Exception(self.MESSAGE_INVALID_FIELDS)\n \n-            if fields[field] != self.NUMBER_TYPE and fields[field] != self.STRING_TYPE:\n+            if fields[field] != self.NUMBER_TYPE and \\\n+                    fields[field] != self.STRING_TYPE:\n                 raise Exception(self.MESSAGE_INVALID_FIELDS)\ndiff --git a/microservices/data_type_handler_image/server.py b/microservices/data_type_handler_image/server.py\n--- a/microservices/data_type_handler_image/server.py\n+++ b/microservices/data_type_handler_image/server.py\n@@ -3,7 +3,8 @@\n from data_type_handler import (\n     MongoOperations,\n     DataTypeHandlerRequestValidator,\n-    DataTypeConverter)\n+    DataTypeConverter,\n+    FileMetadataHandler)\n \n HTTP_STATUS_CODE_SUCESS = 200\n HTTP_STATUS_CODE_SUCESS_CREATED = 201\n@@ -16,12 +17,12 @@\n MESSAGE_RESULT = \"result\"\n \n FILENAME_NAME = \"filename\"\n-\n+FIELD_TYPES_NAMES = \"types\"\n+PARENT_FILENAME_NAME = \"input_filename\"\n FIRST_ARGUMENT = 0\n \n MESSAGE_INVALID_URL = \"invalid_url\"\n MESSAGE_DUPLICATE_FILE = \"duplicate_file\"\n-MESSAGE_CHANGED_FILE = \"file_changed\"\n MESSAGE_DELETED_FILE = \"deleted_file\"\n \n DATABASE_URL = \"DATABASE_URL\"\n@@ -29,51 +30,49 @@\n DATABASE_NAME = \"DATABASE_NAME\"\n DATABASE_REPLICA_SET = \"DATABASE_REPLICA_SET\"\n \n-PATCH = \"PATCH\"\n+MICROSERVICE_URI_GET = \"/api/learningOrchestra/v1/dataset/\"\n+MICROSERVICE_URI_GET_PARAMS = \"?query={}&limit=20&skip=0\"\n \n app = Flask(__name__)\n \n \n-def collection_database_url(database_url, database_name, database_filename,\n-                            database_replica_set):\n-    return database_url + '/' + \\\n-        database_name + '.' + \\\n-        database_filename + \"?replicaSet=\" + \\\n-        database_replica_set + \\\n-        \"&authSource=admin\"\n-\n-\n-@app.route('/fieldtypes/<filename>', methods=[PATCH])\n-def change_data_type(filename):\n+@app.route('/fieldTypes', methods=[\"PATCH\"])\n+def change_data_type():\n     database = MongoOperations(\n-        os.environ[DATABASE_URL] + '/?replicaSet=' +\n-        os.environ[DATABASE_REPLICA_SET], os.environ[DATABASE_PORT],\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_PORT],\n         os.environ[DATABASE_NAME])\n \n     request_validator = DataTypeHandlerRequestValidator(database)\n \n     try:\n-        request_validator.filename_validator(filename)\n+        parent_filename = request.json[PARENT_FILENAME_NAME]\n+\n+        request_validator.filename_validator(parent_filename)\n     except Exception as invalid_filename:\n         return jsonify(\n-            {MESSAGE_RESULT:\n-                invalid_filename.args[FIRST_ARGUMENT]}),\\\n-            HTTP_STATUS_CODE_NOT_ACCEPTABLE\n+            {MESSAGE_RESULT: invalid_filename.args[FIRST_ARGUMENT]}), \\\n+               HTTP_STATUS_CODE_NOT_ACCEPTABLE\n \n     try:\n         request_validator.fields_validator(\n-            filename, request.json)\n+            parent_filename, request.json[FIELD_TYPES_NAMES])\n     except Exception as invalid_fields:\n         return jsonify(\n-            {MESSAGE_RESULT: invalid_fields.args[FIRST_ARGUMENT]}),\\\n-            HTTP_STATUS_CODE_NOT_ACCEPTABLE\n-\n-    data_type_converter = DataTypeConverter(database)\n-    data_type_converter.file_converter(\n-        filename, request.json)\n-\n-    return jsonify({MESSAGE_RESULT: MESSAGE_CHANGED_FILE}), \\\n-        HTTP_STATUS_CODE_SUCESS\n+            {MESSAGE_RESULT: invalid_fields.args[FIRST_ARGUMENT]}), \\\n+               HTTP_STATUS_CODE_NOT_ACCEPTABLE\n+\n+    metadata_handler = FileMetadataHandler(database)\n+    data_type_converter = DataTypeConverter(database, metadata_handler)\n+    data_type_converter.convert_existent_file(\n+        parent_filename, request.json[FIELD_TYPES_NAMES])\n+\n+    return jsonify({\n+        MESSAGE_RESULT:\n+            MICROSERVICE_URI_GET +\n+            request.json[PARENT_FILENAME_NAME] +\n+            MICROSERVICE_URI_GET_PARAMS}), HTTP_STATUS_CODE_SUCESS\n \n \n if __name__ == \"__main__\":\ndiff --git a/microservices/database_api_image/database.py b/microservices/database_api_image/database.py\n--- a/microservices/database_api_image/database.py\n+++ b/microservices/database_api_image/database.py\n@@ -1,4 +1,3 @@\n-import os\n from pymongo import MongoClient, errors, ASCENDING\n from bson.json_util import dumps\n import requests\n@@ -25,7 +24,8 @@ def __init__(self, database_object, file_manager_object):\n \n     def add_file(self, url, filename):\n         try:\n-            self.file_manager_object.storage_file(filename, url, self.database_object)\n+            self.file_manager_object.storage_file(filename, url,\n+                                                  self.database_object)\n \n         except requests.exceptions.RequestException:\n             raise Exception(self.MESSAGE_INVALID_URL)\n@@ -41,7 +41,7 @@ def read_file(self, filename, skip, limit, query):\n         limit = int(limit)\n \n         for file in self.database_object.find_in_file(\n-            filename, query_object, skip, limit\n+                filename, query_object, skip, limit\n         ):\n             result.append(json.loads(dumps(file)))\n \n@@ -50,13 +50,15 @@ def read_file(self, filename, skip, limit, query):\n     def delete_file(self, filename):\n         self.database_object.delete_file(filename)\n \n-    def get_files(self):\n+    def get_files(self, type):\n         result = []\n \n         for file in self.database_object.get_filenames():\n             metadata_file = self.database_object.find_one_in_file(\n-                file, {ROW_ID: METADATA_ROW_ID}\n+                file, {ROW_ID: METADATA_ROW_ID, \"type\": type}\n             )\n+            if metadata_file == None:\n+                continue\n             metadata_file.pop(ROW_ID)\n             result.append(metadata_file)\n \n@@ -67,11 +69,10 @@ class MongoOperations:\n     DATABASE_URL = \"DATABASE_URL\"\n     DATABASE_PORT = \"DATABASE_PORT\"\n \n-    def __init__(self):\n+    def __init__(self, database_url, replica_set, database_port, database_name):\n         self.mongo_client = MongoClient(\n-            os.environ[self.DATABASE_URL], int(os.environ[self.DATABASE_PORT])\n-        )\n-        self.database = self.mongo_client.database\n+            database_url + '/?replicaSet=' + replica_set, int(database_port))\n+        self.database = self.mongo_client[database_name]\n \n     def connection(self, filename):\n         return self.database[filename]\n@@ -79,7 +80,8 @@ def connection(self, filename):\n     def find_in_file(self, filename, query, skip=0, limit=1):\n         file_collection = self.database[filename]\n         return (\n-            file_collection.find(query).sort(ROW_ID, ASCENDING).skip(skip).limit(limit)\n+            file_collection.find(query).sort(ROW_ID, ASCENDING).skip(\n+                skip).limit(limit)\n         )\n \n     def delete_file(self, filename):\n@@ -109,7 +111,8 @@ class CsvDownloader:\n     file_headers = None\n \n     def __init__(self):\n-        self.thread_pool = ThreadPoolExecutor(max_workers=self.MAX_NUMBER_THREADS)\n+        self.thread_pool = ThreadPoolExecutor(\n+            max_workers=self.MAX_NUMBER_THREADS)\n         self.download_tratament_queue = Queue(maxsize=self.MAX_QUEUE_SIZE)\n         self.tratament_save_queue = Queue(maxsize=self.MAX_QUEUE_SIZE)\n \n@@ -152,7 +155,8 @@ def save_file(self, database_connection, filename):\n             {\"$set\": {self.FINISHED: True, \"fields\": self.file_headers}},\n         )\n \n-    def validate_csv_url(self, url):\n+    @staticmethod\n+    def validate_csv_url(url):\n         with closing(requests.get(url, stream=True)) as r:\n             reader = csv.reader(\n                 codecs.iterdecode(r.iter_lines(), encoding=\"utf-8\"),\n@@ -163,8 +167,8 @@ def validate_csv_url(self, url):\n             first_symbol_html = \"<\"\n             first_symbol_json = \"{\"\n             if (\n-                first_line[0][0] == first_symbol_html\n-                or first_line[0][0] == first_symbol_json\n+                    first_line[0][0] == first_symbol_html\n+                    or first_line[0][0] == first_symbol_json\n             ):\n                 raise requests.exceptions.RequestException\n \n@@ -181,6 +185,7 @@ def storage_file(self, filename, url, database_connection):\n             ROW_ID: METADATA_ROW_ID,\n             self.FINISHED: False,\n             \"fields\": \"processing\",\n+            \"type\": \"dataset\"\n         }\n         database_connection.insert_one_in_file(filename, metadata_file)\n         self.thread_pool.submit(self.download_file, url)\ndiff --git a/microservices/database_api_image/server.py b/microservices/database_api_image/server.py\n--- a/microservices/database_api_image/server.py\n+++ b/microservices/database_api_image/server.py\n@@ -1,6 +1,7 @@\n from flask import jsonify, request, Flask\n import os\n from database import CsvDownloader, DatabaseApi, MongoOperations\n+from concurrent.futures import ThreadPoolExecutor\n \n HTTP_STATUS_CODE_SUCESS = 200\n HTTP_STATUS_CODE_SUCESS_CREATED = 201\n@@ -12,32 +13,40 @@\n \n MESSAGE_RESULT = \"result\"\n \n+DATABASE_URL = \"DATABASE_URL\"\n+DATABASE_PORT = \"DATABASE_PORT\"\n+DATABASE_NAME = \"DATABASE_NAME\"\n+DATABASE_REPLICA_SET = \"DATABASE_REPLICA_SET\"\n+\n FILENAME = \"filename\"\n+URL_FIELD_NAME = \"url\"\n \n FIRST_ARGUMENT = 0\n \n MESSAGE_INVALID_URL = \"invalid_url\"\n MESSAGE_DUPLICATE_FILE = \"duplicate_file\"\n-MESSAGE_CREATED_FILE = \"file_created\"\n MESSAGE_DELETED_FILE = \"deleted_file\"\n \n-GET = \"GET\"\n-POST = \"POST\"\n-DELETE = \"DELETE\"\n-\n PAGINATE_FILE_LIMIT = 20\n \n+MICROSERVICE_URI_GET = \"/api/learningOrchestra/v1/dataset/\"\n+MICROSERVICE_URI_GET_PARAMS = \"?query={}&limit=10&skip=0\"\n+\n app = Flask(__name__)\n \n \n-@app.route(\"/files\", methods=[POST])\n+@app.route(\"/files\", methods=[\"POST\"])\n def create_file():\n-    file_downloader_and_saver = CsvDownloader()\n-    mongo_operations = MongoOperations()\n-    database = DatabaseApi(mongo_operations, file_downloader_and_saver)\n+    file_downloader = CsvDownloader()\n+    mongo_operations = MongoOperations(\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_PORT],\n+        os.environ[DATABASE_NAME])\n+    database = DatabaseApi(mongo_operations, file_downloader)\n \n     try:\n-        database.add_file(request.json[\"url\"], request.json[FILENAME])\n+        database.add_file(request.json[URL_FIELD_NAME], request.json[FILENAME])\n \n     except Exception as error_message:\n \n@@ -54,16 +63,24 @@ def create_file():\n             )\n \n     return (\n-        jsonify({MESSAGE_RESULT: MESSAGE_CREATED_FILE}),\n+        jsonify({\n+            MESSAGE_RESULT:\n+                MICROSERVICE_URI_GET +\n+                request.json[FILENAME] +\n+                MICROSERVICE_URI_GET_PARAMS}),\n         HTTP_STATUS_CODE_SUCESS_CREATED,\n     )\n \n \n-@app.route(\"/files/<filename>\", methods=[GET])\n+@app.route(\"/files/<filename>\", methods=[\"GET\"])\n def read_files(filename):\n-    file_downloader_and_saver = CsvDownloader()\n-    mongo_operations = MongoOperations()\n-    database = DatabaseApi(mongo_operations, file_downloader_and_saver)\n+    file_downloader = CsvDownloader()\n+    mongo_operations = MongoOperations(\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_PORT],\n+        os.environ[DATABASE_NAME])\n+    database = DatabaseApi(mongo_operations, file_downloader)\n \n     limit = int(request.args.get(\"limit\"))\n     if limit > PAGINATE_FILE_LIMIT:\n@@ -76,25 +93,38 @@ def read_files(filename):\n     return jsonify({MESSAGE_RESULT: file_result}), HTTP_STATUS_CODE_SUCESS\n \n \n-@app.route(\"/files\", methods=[GET])\n+@app.route(\"/files\", methods=[\"GET\"])\n def read_files_descriptor():\n-    file_downloader_and_saver = CsvDownloader()\n-    mongo_operations = MongoOperations()\n-    database = DatabaseApi(mongo_operations, file_downloader_and_saver)\n+    file_downloader = CsvDownloader()\n+    mongo_operations = MongoOperations(\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_PORT],\n+        os.environ[DATABASE_NAME])\n+    database = DatabaseApi(mongo_operations, file_downloader)\n \n-    return jsonify({MESSAGE_RESULT: database.get_files()}), HTTP_STATUS_CODE_SUCESS\n+    return jsonify(\n+        {MESSAGE_RESULT: database.get_files(\n+            request.args.get(\"type\"))}), HTTP_STATUS_CODE_SUCESS\n \n \n-@app.route(\"/files/<filename>\", methods=[DELETE])\n+@app.route(\"/files/<filename>\", methods=[\"DELETE\"])\n def delete_file(filename):\n-    file_downloader_and_saver = CsvDownloader()\n-    mongo_operations = MongoOperations()\n-    database = DatabaseApi(mongo_operations, file_downloader_and_saver)\n+    file_downloader = CsvDownloader()\n+    mongo_operations = MongoOperations(\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_PORT],\n+        os.environ[DATABASE_NAME])\n+    database = DatabaseApi(mongo_operations, file_downloader)\n \n-    database.delete_file(filename)\n+    thread_pool = ThreadPoolExecutor()\n+    thread_pool.submit(database.delete_file, filename)\n \n-    return jsonify({MESSAGE_RESULT: MESSAGE_DELETED_FILE}), HTTP_STATUS_CODE_SUCESS\n+    return jsonify(\n+        {MESSAGE_RESULT: MESSAGE_DELETED_FILE}), HTTP_STATUS_CODE_SUCESS\n \n \n if __name__ == \"__main__\":\n-    app.run(host=os.environ[DATABASE_API_HOST], port=int(os.environ[DATABASE_API_PORT]))\n+    app.run(host=os.environ[DATABASE_API_HOST],\n+            port=int(os.environ[DATABASE_API_PORT]))\ndiff --git a/microservices/histogram_image/histogram.py b/microservices/histogram_image/histogram.py\n--- a/microservices/histogram_image/histogram.py\n+++ b/microservices/histogram_image/histogram.py\n@@ -1,4 +1,6 @@\n from pymongo import MongoClient\n+from datetime import datetime\n+import pytz\n \n \n class Histogram:\n@@ -9,11 +11,17 @@ def __init__(self, database_connector):\n         self.database_connector = database_connector\n \n     def create_histogram(self, filename, histogram_filename, fields):\n+        timezone_london = pytz.timezone(\"Etc/Greenwich\")\n+        london_time = datetime.now(timezone_london)\n+\n         metadata_histogram_filename = {\n-            \"filename_parent\": filename,\n+            \"parent_filename\": filename,\n             \"fields\": fields,\n             \"filename\": histogram_filename,\n-            \"_id\": 0,\n+            \"type\": \"histogram\",\n+            self.DOCUMENT_ID_NAME: self.METADATA_DOCUMENT_ID,\n+            \"finished\": False,\n+            \"time_created\": london_time.strftime(\"%Y-%m-%dT%H:%M:%S-00:00\")\n         }\n \n         self.database_connector.insert_one_in_file(\n@@ -25,20 +33,30 @@ def create_histogram(self, filename, histogram_filename, fields):\n         for field in fields:\n             field_accumulator = \"$\" + field\n             print(field_accumulator, flush=True)\n-            pipeline = [{\"$group\": {\"_id\": field_accumulator, \"count\": {\"$sum\": 1}}}]\n+            pipeline = [\n+                {\"$group\": {\"_id\": field_accumulator, \"count\": {\"$sum\": 1}}}]\n \n             field_result = {\n                 field: self.database_connector.aggregate(filename, pipeline),\n-                \"_id\": document_id,\n+                self.DOCUMENT_ID_NAME: document_id,\n             }\n             document_id += 1\n \n-            self.database_connector.insert_one_in_file(histogram_filename, field_result)\n+            self.database_connector.insert_one_in_file(histogram_filename,\n+                                                       field_result)\n+\n+        metadata_finished_true_query = {\"finished\": True}\n+        metadata_id_query = {self.DOCUMENT_ID_NAME: self.METADATA_DOCUMENT_ID}\n+\n+        self.database_connector.update_one(filename,\n+                                           metadata_finished_true_query,\n+                                           metadata_id_query)\n \n \n class MongoOperations:\n-    def __init__(self, database_url, database_port, database_name):\n-        self.mongo_client = MongoClient(database_url, int(database_port))\n+    def __init__(self, database_url, replica_set, database_port, database_name):\n+        self.mongo_client = MongoClient(\n+            database_url + '/?replicaSet=' + replica_set, int(database_port))\n         self.database = self.mongo_client[database_name]\n \n     def find(self, filename, query):\n@@ -93,7 +111,8 @@ def fields_validator(self, filename, fields):\n \n         filename_metadata_query = {\"filename\": filename}\n \n-        filename_metadata = self.database.find_one(filename, filename_metadata_query)\n+        filename_metadata = self.database.find_one(filename,\n+                                                   filename_metadata_query)\n \n         for field in fields:\n             if field not in filename_metadata[\"fields\"]:\ndiff --git a/microservices/histogram_image/server.py b/microservices/histogram_image/server.py\n--- a/microservices/histogram_image/server.py\n+++ b/microservices/histogram_image/server.py\n@@ -1,6 +1,7 @@\n from flask import jsonify, Flask, request\n import os\n from histogram import MongoOperations, HistogramRequestValidator, Histogram\n+from concurrent.futures import ThreadPoolExecutor\n \n HTTP_STATUS_CODE_SUCESS_CREATED = 201\n HTTP_STATUS_CODE_NOT_ACCEPTABLE = 406\n@@ -11,8 +12,9 @@\n \n MESSAGE_RESULT = \"result\"\n \n-FIELDS_NAME = \"fields\"\n-HISTOGRAM_FILENAME_NAME = \"histogram_filename\"\n+FIELDS_NAME = \"names\"\n+HISTOGRAM_FILENAME_NAME = \"output_filename\"\n+PARENT_FILENAME_NAME = \"input_filename\"\n \n FIRST_ARGUMENT = 0\n \n@@ -23,21 +25,19 @@\n DATABASE_NAME = \"DATABASE_NAME\"\n DATABASE_REPLICA_SET = \"DATABASE_REPLICA_SET\"\n \n-POST = \"POST\"\n+MICROSERVICE_URI_GET = \"/api/learningOrchestra/v1/explore/histogram/\"\n+MICROSERVICE_URI_GET_PARAMS = \"?query={}&limit=10&skip=0\"\n \n app = Flask(__name__)\n \n+thread_pool = ThreadPoolExecutor()\n \n-def collection_database_url(database_url, database_replica_set):\n-    return database_url + \"/?replicaSet=\" + database_replica_set\n \n-\n-@app.route(\"/histograms/<parent_filename>\", methods=[POST])\n-def create_histogram(parent_filename):\n+@app.route(\"/histograms\", methods=[\"POST\"])\n+def create_histogram():\n     database = MongoOperations(\n-        collection_database_url(\n-            os.environ[DATABASE_URL], os.environ[DATABASE_REPLICA_SET]\n-        ),\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n         os.environ[DATABASE_PORT],\n         os.environ[DATABASE_NAME],\n     )\n@@ -50,11 +50,13 @@ def create_histogram(parent_filename):\n         )\n     except Exception as invalid_histogram_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_histogram_filename.args[FIRST_ARGUMENT]}),\n+            jsonify({MESSAGE_RESULT: invalid_histogram_filename.args[\n+                FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_CONFLICT,\n         )\n \n     try:\n+        parent_filename = request.json[PARENT_FILENAME_NAME]\n         request_validator.filename_validator(parent_filename)\n     except Exception as invalid_filename:\n         return (\n@@ -63,25 +65,40 @@ def create_histogram(parent_filename):\n         )\n \n     try:\n-        request_validator.fields_validator(parent_filename, request.json[FIELDS_NAME])\n+        request_validator.fields_validator(parent_filename,\n+                                           request.json[FIELDS_NAME])\n     except Exception as invalid_fields:\n         return (\n             jsonify({MESSAGE_RESULT: invalid_fields.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_NOT_ACCEPTABLE,\n         )\n \n-    histogram = Histogram(database)\n-    histogram.create_histogram(\n-        parent_filename,\n-        request.json[HISTOGRAM_FILENAME_NAME],\n-        request.json[FIELDS_NAME],\n-    )\n+    thread_pool.submit(histogram_async_processing,\n+                       database,\n+                       parent_filename,\n+                       request.json[HISTOGRAM_FILENAME_NAME],\n+                       request.json[FIELDS_NAME])\n \n     return (\n-        jsonify({MESSAGE_RESULT: MESSAGE_CREATED_FILE}),\n+        jsonify({\n+            MESSAGE_RESULT:\n+                MICROSERVICE_URI_GET +\n+                request.json[HISTOGRAM_FILENAME_NAME] +\n+                MICROSERVICE_URI_GET_PARAMS}),\n         HTTP_STATUS_CODE_SUCESS_CREATED,\n     )\n \n \n+def histogram_async_processing(database, parent_filename, histogram_filename,\n+                               fields_name):\n+    histogram = Histogram(database)\n+    histogram.create_histogram(\n+        parent_filename,\n+        histogram_filename,\n+        fields_name,\n+    )\n+\n+\n if __name__ == \"__main__\":\n-    app.run(host=os.environ[HISTOGRAM_HOST], port=int(os.environ[HISTOGRAM_PORT]))\n+    app.run(host=os.environ[HISTOGRAM_HOST],\n+            port=int(os.environ[HISTOGRAM_PORT]))\ndiff --git a/microservices/model_builder_image/model_builder.py b/microservices/model_builder_image/model_builder.py\n--- a/microservices/model_builder_image/model_builder.py\n+++ b/microservices/model_builder_image/model_builder.py\n@@ -1,9 +1,12 @@\n from pyspark.sql import SparkSession\n import os\n import time\n+import numpy as np\n from pyspark.ml.evaluation import MulticlassClassificationEvaluator\n from pymongo import MongoClient\n from concurrent.futures import ThreadPoolExecutor, wait\n+from datetime import datetime\n+import pytz\n from pyspark.ml.classification import (\n     LogisticRegression,\n     DecisionTreeClassifier,\n@@ -28,34 +31,37 @@ def __init__(self, database_connector):\n \n         self.spark_session = (\n             SparkSession.builder.appName(\"model_builder\")\n-            .config(\"spark.driver.port\", os.environ[SPARK_DRIVER_PORT])\n-            .config(\"spark.driver.host\", os.environ[MODEL_BUILDER_HOST_NAME])\n-            .config(\n+                .config(\"spark.driver.port\", os.environ[SPARK_DRIVER_PORT])\n+                .config(\"spark.driver.host\",\n+                        os.environ[MODEL_BUILDER_HOST_NAME])\n+                .config(\n                 \"spark.jars.packages\",\n                 \"org.mongodb.spark:mongo-spark\" + \"-connector_2.11:2.4.2\",\n             )\n-            .config(\"spark.memory.fraction\", 0.8)\n-            .config(\"spark.executor.memory\", \"1g\")\n-            .config(\"spark.sql.shuffle.partitions\", \"800\")\n-            .config(\"spark.memory.offHeap.enabled\", \"true\")\n-            .config(\"spark.memory.offHeap.size\", \"1g\")\n-            .config(\"spark.scheduler.mode\", \"FAIR\")\n-            .config(\"spark.scheduler.pool\", \"model_builder\")\n-            .config(\"spark.scheduler.allocation.file\", \"./fairscheduler.xml\")\n-            .master(\n+                .config(\"spark.memory.fraction\", 0.8)\n+                .config(\"spark.executor.memory\", \"1g\")\n+                .config(\"spark.sql.shuffle.partitions\", \"800\")\n+                .config(\"spark.memory.offHeap.enabled\", \"true\")\n+                .config(\"spark.memory.offHeap.size\", \"1g\")\n+                .config(\"spark.scheduler.mode\", \"FAIR\")\n+                .config(\"spark.scheduler.pool\", \"model_builder\")\n+                .config(\"spark.scheduler.allocation.file\",\n+                        \"./fairscheduler.xml\")\n+                .master(\n                 \"spark://\"\n                 + os.environ[SPARKMASTER_HOST]\n                 + \":\"\n                 + str(os.environ[SPARKMASTER_PORT])\n             )\n-            .getOrCreate()\n+                .getOrCreate()\n         )\n \n         self.thread_pool = ThreadPoolExecutor()\n \n     def file_processor(self, database_url):\n         file = (\n-            self.spark_session.read.format(\"mongo\").option(\"uri\", database_url).load()\n+            self.spark_session.read.format(\"mongo\").option(\"uri\",\n+                                                           database_url).load()\n         )\n \n         file_without_metadata = file.filter(\n@@ -70,12 +76,14 @@ def file_processor(self, database_url):\n             \"time_created\",\n             \"url\",\n             \"parent_filename\",\n+            \"type\"\n         ]\n         processed_file = file_without_metadata.drop(*metadata_fields)\n \n         return processed_file\n \n-    def fields_from_dataframe(self, dataframe, is_string):\n+    @staticmethod\n+    def fields_from_dataframe(dataframe, is_string):\n         text_fields = []\n         first_row = dataframe.first()\n \n@@ -91,12 +99,13 @@ def fields_from_dataframe(self, dataframe, is_string):\n         return text_fields\n \n     def build_model(\n-        self,\n-        database_url_training,\n-        database_url_test,\n-        preprocessor_code,\n-        classificators_list,\n-        prediction_filename,\n+            self,\n+            database_url_training,\n+            database_url_test,\n+            preprocessor_code,\n+            classificators_list,\n+            train_filename,\n+            test_filename,\n     ):\n         training_df = self.file_processor(database_url_training)\n         testing_df = self.file_processor(database_url_test)\n@@ -118,40 +127,51 @@ def build_model(\n \n         classificator_threads = []\n \n+        timezone_london = pytz.timezone(\"Etc/Greenwich\")\n+        london_time = datetime.now(timezone_london)\n+        now_time = london_time.strftime(\"%Y-%m-%dT%H:%M:%S-00:00\")\n+\n+        metadata_document = {\n+            \"parent_filename\": [train_filename, test_filename],\n+            \"time_created\": now_time,\n+            \"_id\": 0,\n+            \"type\": \"builder\",\n+            \"finished\": False\n+        }\n+\n         for classificator_name in classificators_list:\n             classificator = classificator_switcher[classificator_name]\n \n+            metadata_classifier = metadata_document.copy()\n+            metadata_classifier[\"classifier\"] = classificator_name\n+            metadata_classifier[\n+                \"filename\"] = test_filename + \"_\" + classificator_name\n+\n+            self.database.insert_one_in_file(\n+                metadata_classifier[\"filename\"],\n+                metadata_classifier)\n+\n             classificator_threads.append(\n                 self.thread_pool.submit(\n                     self.classificator_handler,\n                     classificator,\n-                    classificator_name,\n                     features_training,\n                     features_testing,\n                     features_evaluation,\n-                    prediction_filename,\n+                    metadata_classifier,\n                 )\n             )\n         wait(classificator_threads)\n         self.spark_session.stop()\n \n     def classificator_handler(\n-        self,\n-        classificator,\n-        classificator_name,\n-        features_training,\n-        features_testing,\n-        features_evaluation,\n-        prediction_filename,\n+            self,\n+            classificator,\n+            features_training,\n+            features_testing,\n+            features_evaluation,\n+            metadata_document\n     ):\n-        prediction_filename_name = (\n-            prediction_filename + \"_prediction_\" + classificator_name\n-        )\n-        metadata_document = {\n-            \"filename\": prediction_filename_name,\n-            \"classificator\": classificator_name,\n-            \"_id\": 0,\n-        }\n \n         classificator.featuresCol = \"features\"\n \n@@ -163,7 +183,6 @@ def classificator_handler(\n         metadata_document[\"fit_time\"] = fit_time\n \n         if features_evaluation is not None:\n-\n             evaluation_prediction = model.transform(features_evaluation)\n \n             evaluator_f1 = MulticlassClassificationEvaluator(\n@@ -171,10 +190,10 @@ def classificator_handler(\n             )\n \n             evaluator_accuracy = MulticlassClassificationEvaluator(\n-                labelCol=\"label\", predictionCol=\"prediction\", metricName=\"accuracy\"\n+                labelCol=\"label\", predictionCol=\"prediction\",\n+                metricName=\"accuracy\"\n             )\n \n-            print(classificator_name, flush=True)\n             evaluation_prediction.select(\"label\", \"prediction\").show()\n \n             model_f1 = evaluator_f1.evaluate(evaluation_prediction)\n@@ -186,13 +205,11 @@ def classificator_handler(\n         testing_prediction = model.transform(features_testing)\n \n         self.save_classificator_result(\n-            prediction_filename_name, testing_prediction, metadata_document\n+            testing_prediction,\n+            metadata_document\n         )\n \n-    def save_classificator_result(self, filename_name, predicted_df, filename_metatada):\n-        self.database.delete_file(filename_name)\n-        self.database.insert_one_in_file(filename_name, filename_metatada)\n-\n+    def save_classificator_result(self, predicted_df, filename_metatada):\n         document_id = 1\n         for row in predicted_df.collect():\n             row_dict = row.asDict()\n@@ -204,12 +221,19 @@ def save_classificator_result(self, filename_name, predicted_df, filename_metata\n             del row_dict[\"features\"]\n             del row_dict[\"rawPrediction\"]\n \n-            self.database.insert_one_in_file(filename_name, row_dict)\n+            self.database.insert_one_in_file(filename_metatada[\"filename\"],\n+                                             row_dict)\n+\n+        flag_true_query = {\"finished\": True}\n+        metadata_file_query = {\"_id\": 0}\n+        self.database.update_one(filename_metatada[\"filename\"], flag_true_query,\n+                                 metadata_file_query)\n \n \n class MongoOperations:\n-    def __init__(self, database_url, database_port, database_name):\n-        self.mongo_client = MongoClient(database_url, int(database_port))\n+    def __init__(self, database_url, replica_set, database_port, database_name):\n+        self.mongo_client = MongoClient(\n+            database_url + '/?replicaSet=' + replica_set, int(database_port))\n         self.database = self.mongo_client[database_name]\n \n     def get_filenames(self):\n@@ -219,6 +243,11 @@ def find_one(self, filename, query):\n         file_collection = self.database[filename]\n         return file_collection.find_one(query)\n \n+    def update_one(self, filename, new_value, query):\n+        new_values_query = {\"$set\": new_value}\n+        file_collection = self.database[filename]\n+        file_collection.update_one(query, new_values_query)\n+\n     def insert_one_in_file(self, filename, json_object):\n         file_collection = self.database[filename]\n         file_collection.insert_one(json_object)\n@@ -227,11 +256,28 @@ def delete_file(self, filename):\n         file_collection = self.database[filename]\n         file_collection.drop()\n \n+    @staticmethod\n+    def collection_database_url(database_url, database_name,\n+                                database_filename,\n+                                database_replica_set\n+                                ):\n+        return (\n+                database_url\n+                + \"/\"\n+                + database_name\n+                + \".\"\n+                + database_filename\n+                + \"?replicaSet=\"\n+                + database_replica_set\n+                + \"&authSource=admin\"\n+        )\n+\n \n class ModelBuilderRequestValidator:\n     MESSAGE_INVALID_TRAINING_FILENAME = \"invalid_training_filename\"\n     MESSAGE_INVALID_TEST_FILENAME = \"invalid_test_filename\"\n-    MESSAGE_INVALID_CLASSIFICATOR = \"invalid_classificator_name\"\n+    MESSAGE_INVALID_CLASSIFICATOR = \"invalid_classifier_name\"\n+    MESSSAGE_INVALID_PREDICTION_NAME = \"prediction_filename_already_exists\"\n \n     def __init__(self, database_connector):\n         self.database = database_connector\n@@ -248,6 +294,14 @@ def test_filename_validator(self, test_filename):\n         if test_filename not in filenames:\n             raise Exception(self.MESSAGE_INVALID_TEST_FILENAME)\n \n+    def predictions_filename_validator(self, test_filename, classificator_list):\n+        filenames = self.database.get_filenames()\n+\n+        for classificator_name in classificator_list:\n+            prediction_filename = test_filename + \"_\" + classificator_name\n+            if prediction_filename in filenames:\n+                raise Exception(self.MESSSAGE_INVALID_PREDICTION_NAME)\n+\n     def model_classificators_validator(self, classificators_list):\n         classificator_names_list = [\"lr\", \"dt\", \"rf\", \"gb\", \"nb\"]\n         for classificator_name in classificators_list:\ndiff --git a/microservices/model_builder_image/server.py b/microservices/model_builder_image/server.py\n--- a/microservices/model_builder_image/server.py\n+++ b/microservices/model_builder_image/server.py\n@@ -5,54 +5,41 @@\n     MongoOperations,\n     ModelBuilderRequestValidator,\n )\n+from concurrent.futures import ThreadPoolExecutor\n \n HTTP_STATUS_CODE_SUCESS_CREATED = 201\n HTTP_STATUS_CODE_NOT_ACCEPTABLE = 406\n+HTTP_STATUS_CODE_CONFICLT = 409\n \n MODEL_BUILDER_HOST_IP = \"MODEL_BUILDER_HOST_IP\"\n MODEL_BUILDER_HOST_PORT = \"MODEL_BUILDER_HOST_PORT\"\n \n-GET = \"GET\"\n-POST = \"POST\"\n-DELETE = \"DELETE\"\n-\n MESSAGE_RESULT = \"result\"\n-MESSAGE_CREATED_FILE = \"created_file\"\n \n DATABASE_URL = \"DATABASE_URL\"\n DATABASE_PORT = \"DATABASE_PORT\"\n DATABASE_NAME = \"DATABASE_NAME\"\n DATABASE_REPLICA_SET = \"DATABASE_REPLICA_SET\"\n \n-\n-TRAINING_FILENAME = \"training_filename\"\n+TRAINING_FILENAME = \"train_filename\"\n TEST_FILENAME = \"test_filename\"\n-PREPROCESSOR_CODE_NAME = \"preprocessor_code\"\n-CLASSIFICATORS_NAME = \"classificators_list\"\n+MODELING_CODE_NAME = \"modeling_code\"\n+CLASSIFIERS_NAME = \"classifiers_list\"\n FIRST_ARGUMENT = 0\n \n+MICROSERVICE_URI_GET = \"/api/learningOrchestra/v1/builder/\"\n+MICROSERVICE_URI_GET_PARAMS = \"?query={}&limit=10&skip=0\"\n+\n app = Flask(__name__)\n \n-\n-def collection_database_url(\n-    database_url, database_name, database_filename, database_replica_set\n-):\n-    return (\n-        database_url\n-        + \"/\"\n-        + database_name\n-        + \".\"\n-        + database_filename\n-        + \"?replicaSet=\"\n-        + database_replica_set\n-        + \"&authSource=admin\"\n-    )\n+thread_pool = ThreadPoolExecutor()\n \n \n-@app.route(\"/models\", methods=[POST])\n+@app.route(\"/models\", methods=[\"POST\"])\n def create_model():\n     database = MongoOperations(\n-        os.environ[DATABASE_URL] + \"/?replicaSet=\" + os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n         os.environ[DATABASE_PORT],\n         os.environ[DATABASE_NAME],\n     )\n@@ -60,10 +47,12 @@ def create_model():\n     request_validator = ModelBuilderRequestValidator(database)\n \n     try:\n-        request_validator.training_filename_validator(request.json[TRAINING_FILENAME])\n+        request_validator.training_filename_validator(\n+            request.json[TRAINING_FILENAME])\n     except Exception as invalid_training_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_training_filename.args[FIRST_ARGUMENT]}),\n+            jsonify({MESSAGE_RESULT: invalid_training_filename.args[\n+                FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_NOT_ACCEPTABLE,\n         )\n \n@@ -71,53 +60,99 @@ def create_model():\n         request_validator.test_filename_validator(request.json[TEST_FILENAME])\n     except Exception as invalid_test_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_test_filename.args[FIRST_ARGUMENT]}),\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_test_filename.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_NOT_ACCEPTABLE,\n         )\n \n     try:\n         request_validator.model_classificators_validator(\n-            request.json[CLASSIFICATORS_NAME]\n+            request.json[CLASSIFIERS_NAME]\n         )\n-    except Exception as invalid_classificator_name:\n+    except Exception as invalid_classifier_name:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_classificator_name.args[FIRST_ARGUMENT]}),\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_classifier_name.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_NOT_ACCEPTABLE,\n         )\n \n-    database_url_training = collection_database_url(\n+    try:\n+        request_validator.predictions_filename_validator(\n+            request.json[TEST_FILENAME], request.json[CLASSIFIERS_NAME])\n+    except Exception as invalid_prediction_filename:\n+        return (\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_prediction_filename.args[\n+                    FIRST_ARGUMENT]}),\n+            HTTP_STATUS_CODE_CONFICLT,\n+        )\n+\n+    database_url_training = MongoOperations.collection_database_url(\n         os.environ[DATABASE_URL],\n         os.environ[DATABASE_NAME],\n         request.json[TRAINING_FILENAME],\n         os.environ[DATABASE_REPLICA_SET],\n     )\n \n-    database_url_test = collection_database_url(\n+    database_url_test = MongoOperations.collection_database_url(\n         os.environ[DATABASE_URL],\n         os.environ[DATABASE_NAME],\n         request.json[TEST_FILENAME],\n         os.environ[DATABASE_REPLICA_SET],\n     )\n \n-    model_builder = SparkModelBuilder(database)\n-\n-    model_builder.build_model(\n+    thread_pool.submit(\n+        model_builder_async_processing,\n+        database,\n         database_url_training,\n         database_url_test,\n-        request.json[PREPROCESSOR_CODE_NAME],\n-        request.json[CLASSIFICATORS_NAME],\n+        request.json[MODELING_CODE_NAME],\n+        request.json[CLASSIFIERS_NAME],\n+        request.json[TRAINING_FILENAME],\n         request.json[TEST_FILENAME],\n     )\n \n     return (\n-        jsonify({MESSAGE_RESULT: MESSAGE_CREATED_FILE}),\n+        jsonify({\n+            MESSAGE_RESULT:\n+                create_prediction_files_uri(\n+                    request.json[CLASSIFIERS_NAME],\n+                    request.json[TEST_FILENAME])}),\n         HTTP_STATUS_CODE_SUCESS_CREATED,\n     )\n \n \n+def create_prediction_files_uri(classifiers_list, test_filename):\n+    classifiers_uri = []\n+    for classifier in classifiers_list:\n+        classifiers_uri.append(\n+            MICROSERVICE_URI_GET +\n+            test_filename +\n+            \"_\" +\n+            classifier +\n+            MICROSERVICE_URI_GET_PARAMS)\n+\n+    return classifiers_uri\n+\n+\n+def model_builder_async_processing(database, database_url_training,\n+                                   database_url_test, modeling_code,\n+                                   classifiers_name, train_filename,\n+                                   test_filename):\n+    model_builder = SparkModelBuilder(database)\n+\n+    model_builder.build_model(\n+        database_url_training,\n+        database_url_test,\n+        modeling_code,\n+        classifiers_name,\n+        train_filename,\n+        test_filename,\n+    )\n+\n+\n if __name__ == \"__main__\":\n     app.run(\n         host=os.environ[MODEL_BUILDER_HOST_IP],\n-        port=int(os.environ[MODEL_BUILDER_HOST_PORT]),\n-        debug=True,\n+        port=int(os.environ[MODEL_BUILDER_HOST_PORT])\n     )\ndiff --git a/microservices/pca_image/pca.py b/microservices/pca_image/pca.py\n--- a/microservices/pca_image/pca.py\n+++ b/microservices/pca_image/pca.py\n@@ -23,23 +23,23 @@ class PcaGenerator:\n     def __init__(self, database_url_input):\n         self.spark_session = (\n             SparkSession.builder.appName(\"pca\")\n-            .config(\"spark.mongodb.input.uri\", database_url_input)\n-            .config(\"spark.driver.port\", os.environ[SPARK_DRIVER_PORT])\n-            .config(\"spark.driver.host\", os.environ[PCA_HOST_NAME])\n-            .config(\n+                .config(\"spark.mongodb.input.uri\", database_url_input)\n+                .config(\"spark.driver.port\", os.environ[SPARK_DRIVER_PORT])\n+                .config(\"spark.driver.host\", os.environ[PCA_HOST_NAME])\n+                .config(\n                 \"spark.jars.packages\",\n                 \"org.mongodb.spark:mongo-spark\" + \"-connector_2.11:2.4.2\",\n             )\n-            .master(\n+                .master(\n                 \"spark://\"\n                 + os.environ[SPARKMASTER_HOST]\n                 + \":\"\n                 + str(os.environ[SPARKMASTER_PORT])\n             )\n-            .getOrCreate()\n+                .getOrCreate()\n         )\n \n-    def create_image(self, filename, label_name, pca_filename):\n+    def create_image(self, label_name, pca_filename):\n         dataframe = self.file_processor()\n         dataframe = dataframe.dropna()\n         string_fields = self.fields_from_dataframe(dataframe, is_string=True)\n@@ -59,12 +59,15 @@ def create_image(self, filename, label_name, pca_filename):\n \n         if label_name is not None:\n             embedded_array[label_name] = encoded_dataframe[label_name]\n-            sns_plot = sns.scatterplot(x=0, y=1, data=embedded_array, hue=label_name)\n+            sns_plot = sns.scatterplot(x=0, y=1, data=embedded_array,\n+                                       hue=label_name)\n             sns_plot.get_figure().savefig(image_path)\n         else:\n             sns_plot = sns.scatterplot(x=0, y=1, data=embedded_array)\n             sns_plot.get_figure().savefig(image_path)\n \n+        self.spark_session.stop()\n+\n     def file_processor(self):\n         file = self.spark_session.read.format(self.MONGO_SPARK_SOURCE).load()\n \n@@ -80,6 +83,7 @@ def file_processor(self):\n             \"time_created\",\n             \"url\",\n             \"parent_filename\",\n+            \"type\"\n         ]\n         processed_file = file_without_metadata.drop(*metadata_fields)\n \n@@ -103,8 +107,9 @@ def fields_from_dataframe(dataframe, is_string):\n \n \n class MongoOperations:\n-    def __init__(self, database_url, database_port, database_name):\n-        self.mongo_client = MongoClient(database_url, int(database_port))\n+    def __init__(self, database_url, replica_set, database_port, database_name):\n+        self.mongo_client = MongoClient(\n+            database_url + '/?replicaSet=' + replica_set, int(database_port))\n         self.database = self.mongo_client[database_name]\n \n     def find_one(self, filename, query):\n@@ -114,6 +119,21 @@ def find_one(self, filename, query):\n     def get_filenames(self):\n         return self.database.list_collection_names()\n \n+    @staticmethod\n+    def collection_database_url(database_url, database_name, database_filename,\n+                                database_replica_set\n+                                ):\n+        return (\n+                database_url\n+                + \"/\"\n+                + database_name\n+                + \".\"\n+                + database_filename\n+                + \"?replicaSet=\"\n+                + database_replica_set\n+                + \"&authSource=admin\"\n+        )\n+\n \n class PcaRequestValidator:\n     MESSAGE_INVALID_FILENAME = \"invalid_filename\"\n@@ -130,18 +150,20 @@ def parent_filename_validator(self, filename):\n         if filename not in filenames:\n             raise Exception(self.MESSAGE_INVALID_FILENAME)\n \n-    def pca_filename_existence_validator(self, pca_filename):\n+    @staticmethod\n+    def pca_filename_existence_validator(pca_filename):\n         images = os.listdir(os.environ[IMAGES_PATH])\n         image_name = pca_filename + IMAGE_FORMAT\n         if image_name in images:\n-            raise Exception(self.MESSAGE_DUPLICATE_FILE)\n+            raise Exception(PcaRequestValidator.MESSAGE_DUPLICATE_FILE)\n \n-    def no_pca_filename_existence_validator(self, pca_filename):\n+    @staticmethod\n+    def pca_filename_inexistence_validator(pca_filename):\n         images = os.listdir(os.environ[IMAGES_PATH])\n         image_name = pca_filename + IMAGE_FORMAT\n \n         if image_name not in images:\n-            raise Exception(self.MESSAGE_NOT_FOUND)\n+            raise Exception(PcaRequestValidator.MESSAGE_NOT_FOUND)\n \n     def filename_label_validator(self, filename, label):\n         if label is None:\n@@ -149,7 +171,8 @@ def filename_label_validator(self, filename, label):\n \n         filename_metadata_query = {\"filename\": filename}\n \n-        filename_metadata = self.database.find_one(filename, filename_metadata_query)\n+        filename_metadata = self.database.find_one(filename,\n+                                                   filename_metadata_query)\n \n         if label not in filename_metadata[\"fields\"]:\n             raise Exception(self.MESSAGE_INVALID_LABEL)\ndiff --git a/microservices/pca_image/server.py b/microservices/pca_image/server.py\n--- a/microservices/pca_image/server.py\n+++ b/microservices/pca_image/server.py\n@@ -1,6 +1,7 @@\n from flask import jsonify, request, Flask, send_file\n import os\n from pca import PcaGenerator, MongoOperations, PcaRequestValidator\n+from concurrent.futures import ThreadPoolExecutor\n \n HTTP_STATUS_CODE_SUCESS = 200\n HTTP_STATUS_CODE_SUCESS_CREATED = 201\n@@ -18,46 +19,31 @@\n DATABASE_PORT = \"DATABASE_PORT\"\n DATABASE_NAME = \"DATABASE_NAME\"\n DATABASE_REPLICA_SET = \"DATABASE_REPLICA_SET\"\n-FULL_DATABASE_URL = (\n-    os.environ[DATABASE_URL] + \"/?replicaSet=\" + os.environ[DATABASE_REPLICA_SET]\n-)\n-\n-GET = \"GET\"\n-POST = \"POST\"\n-DELETE = \"DELETE\"\n \n MESSAGE_RESULT = \"result\"\n-PCA_FILENAME_NAME = \"pca_filename\"\n-LABEL_NAME = \"label_name\"\n+PCA_FILENAME_NAME = \"output_filename\"\n+PARENT_FILENAME_NAME = \"input_filename\"\n+LABEL_NAME = \"label\"\n \n-MESSAGE_CREATED_FILE = \"created_file\"\n MESSAGE_DELETED_FILE = \"deleted_file\"\n MESSAGE_NOT_FOUND = \"not_found_file\"\n \n FIRST_ARGUMENT = 0\n \n+MICROSERVICE_URI_GET = \"/api/learningOrchestra/v1/explore/pca/\"\n+\n app = Flask(__name__)\n \n+thread_pool = ThreadPoolExecutor()\n \n-def collection_database_url(\n-    database_url, database_name, database_filename, database_replica_set\n-):\n-    return (\n-        database_url\n-        + \"/\"\n-        + database_name\n-        + \".\"\n-        + database_filename\n-        + \"?replicaSet=\"\n-        + database_replica_set\n-        + \"&authSource=admin\"\n-    )\n \n-\n-@app.route(\"/images/<parent_filename>\", methods=[POST])\n-def create_pca(parent_filename):\n+@app.route(\"/images\", methods=[\"POST\"])\n+def pca_plot():\n     database = MongoOperations(\n-        FULL_DATABASE_URL, os.environ[DATABASE_PORT], os.environ[DATABASE_NAME]\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_PORT],\n+        os.environ[DATABASE_NAME]\n     )\n     request_validator = PcaRequestValidator(database)\n \n@@ -67,12 +53,14 @@ def create_pca(parent_filename):\n         )\n     except Exception as invalid_pca_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_pca_filename.args[FIRST_ARGUMENT]}),\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_pca_filename.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_CONFLICT,\n         )\n \n     try:\n-        request_validator.parent_filename_validator(parent_filename)\n+        request_validator.parent_filename_validator(\n+            request.json[PARENT_FILENAME_NAME])\n     except Exception as invalid_filename:\n         return (\n             jsonify({MESSAGE_RESULT: invalid_filename.args[FIRST_ARGUMENT]}),\n@@ -81,7 +69,7 @@ def create_pca(parent_filename):\n \n     try:\n         request_validator.filename_label_validator(\n-            parent_filename, request.json[LABEL_NAME]\n+            request.json[PARENT_FILENAME_NAME], request.json[LABEL_NAME]\n         )\n     except Exception as invalid_label:\n         return (\n@@ -89,43 +77,51 @@ def create_pca(parent_filename):\n             HTTP_STATUS_CODE_NOT_ACCEPTABLE,\n         )\n \n-    database_url_input = collection_database_url(\n+    database_url_input = MongoOperations.collection_database_url(\n         os.environ[DATABASE_URL],\n         os.environ[DATABASE_NAME],\n-        parent_filename,\n+        request.json[PARENT_FILENAME_NAME],\n         os.environ[DATABASE_REPLICA_SET],\n     )\n \n-    pca_generator = PcaGenerator(database_url_input)\n-\n-    pca_generator.create_image(\n-        parent_filename, request.json[LABEL_NAME], request.json[PCA_FILENAME_NAME]\n-    )\n+    thread_pool.submit(pca_async_processing,\n+                       database_url_input,\n+                       request.json[LABEL_NAME],\n+                       request.json[PCA_FILENAME_NAME])\n \n     return (\n-        jsonify({MESSAGE_RESULT: MESSAGE_CREATED_FILE}),\n+        jsonify({\n+            MESSAGE_RESULT:\n+                MICROSERVICE_URI_GET +\n+                request.json[PCA_FILENAME_NAME]}),\n         HTTP_STATUS_CODE_SUCESS_CREATED,\n     )\n \n \n-@app.route(\"/images\", methods=[GET])\n+def pca_async_processing(database_url_input, label_name,\n+                         pca_filename):\n+    pca_generator = PcaGenerator(database_url_input)\n+\n+    pca_generator.create_image(\n+        label_name, pca_filename\n+    )\n+\n+\n+@app.route(\"/images\", methods=[\"GET\"])\n def get_images():\n     images = os.listdir(os.environ[IMAGES_PATH])\n     return jsonify({MESSAGE_RESULT: images}), HTTP_STATUS_CODE_SUCESS\n \n \n-@app.route(\"/images/<filename>\", methods=[GET])\n+@app.route(\"/images/<filename>\", methods=[\"GET\"])\n def get_image(filename):\n-    database = MongoOperations(\n-        FULL_DATABASE_URL, os.environ[DATABASE_PORT], os.environ[DATABASE_NAME]\n-    )\n-    request_validator = PcaRequestValidator(database)\n-\n     try:\n-        request_validator.no_pca_filename_existence_validator(filename)\n+        PcaRequestValidator.pca_filename_inexistence_validator(filename)\n+\n     except Exception as invalid_pca_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_pca_filename.args[FIRST_ARGUMENT]}),\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_pca_filename.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_NOT_FOUND,\n         )\n \n@@ -134,25 +130,23 @@ def get_image(filename):\n     return send_file(image_path, mimetype=\"image/png\")\n \n \n-@app.route(\"/images/<filename>\", methods=[DELETE])\n+@app.route(\"/images/<filename>\", methods=[\"DELETE\"])\n def delete_image(filename):\n-    database = MongoOperations(\n-        FULL_DATABASE_URL, os.environ[DATABASE_PORT], os.environ[DATABASE_NAME]\n-    )\n-    request_validator = PcaRequestValidator(database)\n-\n     try:\n-        request_validator.no_pca_filename_existence_validator(filename)\n+        PcaRequestValidator.pca_filename_inexistence_validator(filename)\n     except Exception as invalid_pca_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_pca_filename.args[FIRST_ARGUMENT]}),\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_pca_filename.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_NOT_FOUND,\n         )\n \n     image_path = os.environ[IMAGES_PATH] + \"/\" + filename + IMAGE_FORMAT\n-    os.remove(image_path)\n \n-    return jsonify({MESSAGE_RESULT: MESSAGE_DELETED_FILE}), HTTP_STATUS_CODE_SUCESS\n+    thread_pool.submit(os.remove, image_path)\n+\n+    return jsonify(\n+        {MESSAGE_RESULT: MESSAGE_DELETED_FILE}), HTTP_STATUS_CODE_SUCESS\n \n \n if __name__ == \"__main__\":\ndiff --git a/microservices/projection_image/projection.py b/microservices/projection_image/projection.py\n--- a/microservices/projection_image/projection.py\n+++ b/microservices/projection_image/projection.py\n@@ -16,35 +16,41 @@ class SparkManager:\n     MONGO_SPARK_SOURCE = \"com.mongodb.spark.sql.DefaultSource\"\n     METADATA_FILE_ID = 0\n     database_url_output = None\n+    MAX_NUMBER_THREADS = 3\n \n     def __init__(self, database_url_input, database_url_output):\n         self.database_url_output = database_url_output\n \n         self.spark_session = (\n             SparkSession.builder.appName(\"projection\")\n-            .config(\"spark.mongodb.input.uri\", database_url_input)\n-            .config(\"spark.mongodb.output.uri\", database_url_output)\n-            .config(\"spark.driver.port\", os.environ[SPARK_DRIVER_PORT])\n-            .config(\"spark.driver.host\", os.environ[PROJECTION_HOST_NAME])\n-            .config(\n+                .config(\"spark.mongodb.input.uri\", database_url_input)\n+                .config(\"spark.mongodb.output.uri\", database_url_output)\n+                .config(\"spark.driver.port\", os.environ[SPARK_DRIVER_PORT])\n+                .config(\"spark.driver.host\", os.environ[PROJECTION_HOST_NAME])\n+                .config(\n                 \"spark.jars.packages\",\n                 \"org.mongodb.spark:mongo-spark\" + \"-connector_2.11:2.4.2\",\n             )\n-            .master(\n+                .master(\n                 \"spark://\"\n                 + os.environ[SPARKMASTER_HOST]\n                 + \":\"\n                 + str(os.environ[SPARKMASTER_PORT])\n             )\n-            .getOrCreate()\n+                .getOrCreate()\n         )\n \n     def projection(self, filename, projection_filename, fields):\n         timezone_london = pytz.timezone(\"Etc/Greenwich\")\n         london_time = datetime.now(timezone_london)\n \n+        fields.append(self.DOCUMENT_ID)\n         fields_without_id = fields.copy()\n-        fields_without_id.remove(self.DOCUMENT_ID)\n+\n+        try:\n+            fields_without_id.remove(self.DOCUMENT_ID)\n+        except Exception:\n+            pass\n \n         metadata_content = (\n             projection_filename,\n@@ -53,6 +59,7 @@ def projection(self, filename, projection_filename, fields):\n             filename,\n             self.METADATA_FILE_ID,\n             fields_without_id,\n+            \"projection\"\n         )\n \n         metadata_fields = [\n@@ -62,6 +69,7 @@ def projection(self, filename, projection_filename, fields):\n             \"parent_filename\",\n             self.DOCUMENT_ID,\n             \"fields\",\n+            \"type\",\n         ]\n \n         metadata_dataframe = self.spark_session.createDataFrame(\n@@ -70,16 +78,21 @@ def projection(self, filename, projection_filename, fields):\n \n         metadata_dataframe.write.format(self.MONGO_SPARK_SOURCE).save()\n \n-        self.submit_projection_job_spark(fields, metadata_content, metadata_fields)\n+        self.submit_projection_job_spark(fields,\n+                                         metadata_content,\n+                                         metadata_fields)\n \n-    def submit_projection_job_spark(self, fields, metadata_content, metadata_fields):\n-        dataframe = self.spark_session.read.format(self.MONGO_SPARK_SOURCE).load()\n+    def submit_projection_job_spark(self, fields, metadata_content,\n+                                    metadata_fields):\n+        dataframe = self.spark_session.read.format(\n+            self.MONGO_SPARK_SOURCE).load()\n         dataframe = dataframe.filter(\n             dataframe[self.DOCUMENT_ID] != self.METADATA_FILE_ID\n         )\n \n         projection_dataframe = dataframe.select(*fields)\n-        projection_dataframe.write.format(self.MONGO_SPARK_SOURCE).mode(\"append\").save()\n+        projection_dataframe.write.format(self.MONGO_SPARK_SOURCE).mode(\n+            \"append\").save()\n \n         metadata_content_list = list(metadata_content)\n         metadata_content_list[metadata_content_list.index(False)] = True\n@@ -97,8 +110,9 @@ def submit_projection_job_spark(self, fields, metadata_content, metadata_fields)\n \n \n class MongoOperations:\n-    def __init__(self, database_url, database_port, database_name):\n-        self.mongo_client = MongoClient(database_url, int(database_port))\n+    def __init__(self, database_url, replica_set, database_port, database_name):\n+        self.mongo_client = MongoClient(\n+            database_url + '/?replicaSet=' + replica_set, int(database_port))\n         self.database = self.mongo_client[database_name]\n \n     def find_one(self, filename, query):\n@@ -108,6 +122,22 @@ def find_one(self, filename, query):\n     def get_filenames(self):\n         return self.database.list_collection_names()\n \n+    @staticmethod\n+    def collection_database_url(\n+            database_url, database_name, database_filename,\n+            database_replica_set\n+    ):\n+        return (\n+                database_url\n+                + \"/\"\n+                + database_name\n+                + \".\"\n+                + database_filename\n+                + \"?replicaSet=\"\n+                + database_replica_set\n+                + \"&authSource=admin\"\n+        )\n+\n \n class ProjectionRequestValidator:\n     MESSAGE_INVALID_FIELDS = \"invalid_fields\"\n@@ -136,7 +166,8 @@ def projection_fields_validator(self, filename, projection_fields):\n \n         filename_metadata_query = {\"filename\": filename}\n \n-        filename_metadata = self.database.find_one(filename, filename_metadata_query)\n+        filename_metadata = self.database.find_one(filename,\n+                                                   filename_metadata_query)\n \n         for field in projection_fields:\n             if field not in filename_metadata[\"fields\"]:\ndiff --git a/microservices/projection_image/server.py b/microservices/projection_image/server.py\n--- a/microservices/projection_image/server.py\n+++ b/microservices/projection_image/server.py\n@@ -1,6 +1,8 @@\n from flask import jsonify, request, Flask\n import os\n-from projection import SparkManager, MongoOperations, ProjectionRequestValidator\n+from projection import SparkManager, MongoOperations, \\\n+    ProjectionRequestValidator\n+from concurrent.futures import ThreadPoolExecutor\n \n HTTP_STATUS_CODE_SUCESS_CREATED = 201\n HTTP_STATUS_CODE_CONFLICT = 409\n@@ -17,40 +19,25 @@\n DOCUMENT_ID = \"_id\"\n METADATA_DOCUMENT_ID = 0\n \n-GET = \"GET\"\n-POST = \"POST\"\n-DELETE = \"DELETE\"\n-\n MESSAGE_RESULT = \"result\"\n-PROJECTION_FILENAME_NAME = \"projection_filename\"\n-FIELDS_NAME = \"fields\"\n+PROJECTION_FILENAME_NAME = \"output_filename\"\n+PARENT_FILENAME_NAME = \"input_filename\"\n+FIELDS_NAME = \"names\"\n \n-MESSAGE_CREATED_FILE = \"created_file\"\n+MICROSERVICE_URI_GET = \"/api/learningOrchestra/v1/transform/projection/\"\n+MICROSERVICE_URI_GET_PARAMS = \"?query={}&limit=20&skip=0\"\n \n FIRST_ARGUMENT = 0\n \n app = Flask(__name__)\n+thread_pool = ThreadPoolExecutor()\n \n \n-def collection_database_url(\n-    database_url, database_name, database_filename, database_replica_set\n-):\n-    return (\n-        database_url\n-        + \"/\"\n-        + database_name\n-        + \".\"\n-        + database_filename\n-        + \"?replicaSet=\"\n-        + database_replica_set\n-        + \"&authSource=admin\"\n-    )\n-\n-\n-@app.route(\"/projections/<parent_filename>\", methods=[POST])\n-def create_projection(parent_filename):\n+@app.route(\"/projections\", methods=[\"POST\"])\n+def create_projection():\n     database = MongoOperations(\n-        os.environ[DATABASE_URL] + \"/?replicaSet=\" + os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n         os.environ[DATABASE_PORT],\n         os.environ[DATABASE_NAME],\n     )\n@@ -63,11 +50,13 @@ def create_projection(parent_filename):\n         )\n     except Exception as invalid_projection_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_projection_filename.args[FIRST_ARGUMENT]}),\n+            jsonify({MESSAGE_RESULT: invalid_projection_filename.args[\n+                FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_CONFLICT,\n         )\n \n     try:\n+        parent_filename = request.json[PARENT_FILENAME_NAME]\n         request_validator.filename_validator(parent_filename)\n     except Exception as invalid_filename:\n         return (\n@@ -85,37 +74,46 @@ def create_projection(parent_filename):\n             HTTP_STATUS_CODE_NOT_ACCEPTABLE,\n         )\n \n-    database_url_input = collection_database_url(\n+    database_url_input = MongoOperations.collection_database_url(\n         os.environ[DATABASE_URL],\n         os.environ[DATABASE_NAME],\n         parent_filename,\n         os.environ[DATABASE_REPLICA_SET],\n     )\n \n-    database_url_output = collection_database_url(\n+    database_url_output = MongoOperations.collection_database_url(\n         os.environ[DATABASE_URL],\n         os.environ[DATABASE_NAME],\n         request.json[PROJECTION_FILENAME_NAME],\n         os.environ[DATABASE_REPLICA_SET],\n     )\n \n-    spark_manager = SparkManager(database_url_input, database_url_output)\n-\n-    projection_fields = request.json[FIELDS_NAME]\n-\n-    projection_fields.append(DOCUMENT_ID)\n-\n-    spark_manager.projection(\n-        parent_filename, request.json[PROJECTION_FILENAME_NAME], projection_fields\n-    )\n+    thread_pool.submit(projection_async_processing, database_url_input,\n+                       database_url_output, request.json[FIELDS_NAME],\n+                       parent_filename, request.json[PROJECTION_FILENAME_NAME])\n \n     return (\n-        jsonify({MESSAGE_RESULT: MESSAGE_CREATED_FILE}),\n+        jsonify({\n+            MESSAGE_RESULT:\n+                MICROSERVICE_URI_GET +\n+                request.json[PROJECTION_FILENAME_NAME] +\n+                MICROSERVICE_URI_GET_PARAMS}),\n         HTTP_STATUS_CODE_SUCESS_CREATED,\n     )\n \n \n+def projection_async_processing(database_url_input, database_url_output,\n+                                projection_fields, parent_filename,\n+                                output_filename):\n+    spark_manager = SparkManager(database_url_input, database_url_output)\n+\n+    spark_manager.projection(\n+        parent_filename, output_filename,\n+        projection_fields)\n+\n+\n if __name__ == \"__main__\":\n     app.run(\n-        host=os.environ[PROJECTION_HOST_IP], port=int(os.environ[PROJECTION_HOST_PORT])\n+        host=os.environ[PROJECTION_HOST_IP],\n+        port=int(os.environ[PROJECTION_HOST_PORT])\n     )\ndiff --git a/microservices/tsne_image/server.py b/microservices/tsne_image/server.py\n--- a/microservices/tsne_image/server.py\n+++ b/microservices/tsne_image/server.py\n@@ -1,6 +1,7 @@\n from flask import jsonify, request, Flask, send_file\n import os\n from tsne import TsneGenerator, MongoOperations, TsneRequestValidator\n+from concurrent.futures import ThreadPoolExecutor\n \n HTTP_STATUS_CODE_SUCESS = 200\n HTTP_STATUS_CODE_SUCESS_CREATED = 201\n@@ -18,46 +19,31 @@\n DATABASE_PORT = \"DATABASE_PORT\"\n DATABASE_NAME = \"DATABASE_NAME\"\n DATABASE_REPLICA_SET = \"DATABASE_REPLICA_SET\"\n-FULL_DATABASE_URL = (\n-    os.environ[DATABASE_URL] + \"/?replicaSet=\" + os.environ[DATABASE_REPLICA_SET]\n-)\n-\n-GET = \"GET\"\n-POST = \"POST\"\n-DELETE = \"DELETE\"\n \n MESSAGE_RESULT = \"result\"\n-TSNE_FILENAME_NAME = \"tsne_filename\"\n-LABEL_NAME = \"label_name\"\n+PARENT_FILENAME_NAME = \"input_filename\"\n+TSNE_FILENAME_NAME = \"output_filename\"\n+LABEL_NAME = \"label\"\n \n-MESSAGE_CREATED_FILE = \"created_file\"\n MESSAGE_DELETED_FILE = \"deleted_file\"\n MESSAGE_NOT_FOUND = \"not_found_file\"\n \n FIRST_ARGUMENT = 0\n \n-app = Flask(__name__)\n+MICROSERVICE_URI_GET = \"/api/learningOrchestra/v1/explore/tsne/\"\n \n+thread_pool = ThreadPoolExecutor()\n \n-def collection_database_url(\n-    database_url, database_name, database_filename, database_replica_set\n-):\n-    return (\n-        database_url\n-        + \"/\"\n-        + database_name\n-        + \".\"\n-        + database_filename\n-        + \"?replicaSet=\"\n-        + database_replica_set\n-        + \"&authSource=admin\"\n-    )\n+app = Flask(__name__)\n \n \n-@app.route(\"/images/<parent_filename>\", methods=[POST])\n-def create_tsne(parent_filename):\n+@app.route(\"/images\", methods=[\"POST\"])\n+def create_tsne():\n     database = MongoOperations(\n-        FULL_DATABASE_URL, os.environ[DATABASE_PORT], os.environ[DATABASE_NAME]\n+        os.environ[DATABASE_URL],\n+        os.environ[DATABASE_REPLICA_SET],\n+        os.environ[DATABASE_PORT],\n+        os.environ[DATABASE_NAME]\n     )\n     request_validator = TsneRequestValidator(database)\n \n@@ -67,12 +53,14 @@ def create_tsne(parent_filename):\n         )\n     except Exception as invalid_tsne_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_tsne_filename.args[FIRST_ARGUMENT]}),\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_tsne_filename.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_CONFLICT,\n         )\n \n     try:\n-        request_validator.parent_filename_validator(parent_filename)\n+        request_validator.parent_filename_validator(\n+            request.json[PARENT_FILENAME_NAME])\n     except Exception as invalid_filename:\n         return (\n             jsonify({MESSAGE_RESULT: invalid_filename.args[FIRST_ARGUMENT]}),\n@@ -81,7 +69,7 @@ def create_tsne(parent_filename):\n \n     try:\n         request_validator.filename_label_validator(\n-            parent_filename, request.json[LABEL_NAME]\n+            request.json[PARENT_FILENAME_NAME], request.json[LABEL_NAME]\n         )\n     except Exception as invalid_label:\n         return (\n@@ -89,43 +77,50 @@ def create_tsne(parent_filename):\n             HTTP_STATUS_CODE_NOT_ACCEPTABLE,\n         )\n \n-    database_url_input = collection_database_url(\n+    database_url_input = MongoOperations.collection_database_url(\n         os.environ[DATABASE_URL],\n         os.environ[DATABASE_NAME],\n-        parent_filename,\n+        request.json[PARENT_FILENAME_NAME],\n         os.environ[DATABASE_REPLICA_SET],\n     )\n \n-    tsne_generator = TsneGenerator(database_url_input)\n-\n-    tsne_generator.create_image(\n-        parent_filename, request.json[LABEL_NAME], request.json[TSNE_FILENAME_NAME]\n-    )\n+    thread_pool.submit(tsne_async_processing,\n+                       database_url_input,\n+                       request.json[LABEL_NAME],\n+                       request.json[TSNE_FILENAME_NAME])\n \n     return (\n-        jsonify({MESSAGE_RESULT: MESSAGE_CREATED_FILE}),\n+        jsonify({MESSAGE_RESULT:\n+                     MICROSERVICE_URI_GET +\n+                     request.json[TSNE_FILENAME_NAME]}),\n         HTTP_STATUS_CODE_SUCESS_CREATED,\n     )\n \n \n-@app.route(\"/images\", methods=[GET])\n+def tsne_async_processing(database_url_input, label_name,\n+                          tsne_filename):\n+    tsne_generator = TsneGenerator(database_url_input)\n+\n+    tsne_generator.create_image(\n+        label_name,\n+        tsne_filename\n+    )\n+\n+\n+@app.route(\"/images\", methods=[\"GET\"])\n def get_images():\n     images = os.listdir(os.environ[IMAGES_PATH])\n     return jsonify({MESSAGE_RESULT: images}), HTTP_STATUS_CODE_SUCESS\n \n \n-@app.route(\"/images/<filename>\", methods=[GET])\n+@app.route(\"/images/<filename>\", methods=[\"GET\"])\n def get_image(filename):\n-    database = MongoOperations(\n-        FULL_DATABASE_URL, os.environ[DATABASE_PORT], os.environ[DATABASE_NAME]\n-    )\n-    request_validator = TsneRequestValidator(database)\n-\n     try:\n-        request_validator.no_tsne_filename_existence_validator(filename)\n+        TsneRequestValidator.tsne_filename_inexistence_validator(filename)\n     except Exception as invalid_tsne_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_tsne_filename.args[FIRST_ARGUMENT]}),\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_tsne_filename.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_NOT_FOUND,\n         )\n \n@@ -134,26 +129,25 @@ def get_image(filename):\n     return send_file(image_path, mimetype=\"image/png\")\n \n \n-@app.route(\"/images/<filename>\", methods=[DELETE])\n+@app.route(\"/images/<filename>\", methods=[\"DELETE\"])\n def delete_image(filename):\n-    database = MongoOperations(\n-        FULL_DATABASE_URL, os.environ[DATABASE_PORT], os.environ[DATABASE_NAME]\n-    )\n-    request_validator = TsneRequestValidator(database)\n-\n     try:\n-        request_validator.no_tsne_filename_existence_validator(filename)\n+        TsneRequestValidator.tsne_filename_inexistence_validator(filename)\n     except Exception as invalid_tsne_filename:\n         return (\n-            jsonify({MESSAGE_RESULT: invalid_tsne_filename.args[FIRST_ARGUMENT]}),\n+            jsonify(\n+                {MESSAGE_RESULT: invalid_tsne_filename.args[FIRST_ARGUMENT]}),\n             HTTP_STATUS_CODE_NOT_FOUND,\n         )\n \n     image_path = os.environ[IMAGES_PATH] + \"/\" + filename + IMAGE_FORMAT\n-    os.remove(image_path)\n \n-    return jsonify({MESSAGE_RESULT: MESSAGE_DELETED_FILE}), HTTP_STATUS_CODE_SUCESS\n+    thread_pool.submit(os.remove, image_path)\n+\n+    return jsonify(\n+        {MESSAGE_RESULT: MESSAGE_DELETED_FILE}), HTTP_STATUS_CODE_SUCESS\n \n \n if __name__ == \"__main__\":\n-    app.run(host=os.environ[TSNE_HOST_IP], port=int(os.environ[TSNE_HOST_PORT]))\n+    app.run(host=os.environ[TSNE_HOST_IP],\n+            port=int(os.environ[TSNE_HOST_PORT]))\ndiff --git a/microservices/tsne_image/tsne.py b/microservices/tsne_image/tsne.py\n--- a/microservices/tsne_image/tsne.py\n+++ b/microservices/tsne_image/tsne.py\n@@ -23,23 +23,23 @@ class TsneGenerator:\n     def __init__(self, database_url_input):\n         self.spark_session = (\n             SparkSession.builder.appName(\"tsne\")\n-            .config(\"spark.mongodb.input.uri\", database_url_input)\n-            .config(\"spark.driver.port\", os.environ[SPARK_DRIVER_PORT])\n-            .config(\"spark.driver.host\", os.environ[TSNE_HOST_NAME])\n-            .config(\n+                .config(\"spark.mongodb.input.uri\", database_url_input)\n+                .config(\"spark.driver.port\", os.environ[SPARK_DRIVER_PORT])\n+                .config(\"spark.driver.host\", os.environ[TSNE_HOST_NAME])\n+                .config(\n                 \"spark.jars.packages\",\n                 \"org.mongodb.spark:mongo-spark\" + \"-connector_2.11:2.4.2\",\n             )\n-            .master(\n+                .master(\n                 \"spark://\"\n                 + os.environ[SPARKMASTER_HOST]\n                 + \":\"\n                 + str(os.environ[SPARKMASTER_PORT])\n             )\n-            .getOrCreate()\n+                .getOrCreate()\n         )\n \n-    def create_image(self, filename, label_name, tsne_filename):\n+    def create_image(self, label_name, tsne_filename):\n         dataframe = self.file_processor()\n         dataframe = dataframe.dropna()\n         string_fields = self.fields_from_dataframe(dataframe, is_string=True)\n@@ -55,11 +55,13 @@ def create_image(self, filename, label_name, tsne_filename):\n         treated_array = np.array(encoded_dataframe)\n         embedded_array = TSNE().fit_transform(treated_array)\n         embedded_array = pandas.DataFrame(embedded_array)\n-        image_path = os.environ[IMAGES_PATH] + \"/\" + tsne_filename + IMAGE_FORMAT\n+        image_path = os.environ[\n+                         IMAGES_PATH] + \"/\" + tsne_filename + IMAGE_FORMAT\n \n         if label_name is not None:\n             embedded_array[label_name] = encoded_dataframe[label_name]\n-            sns_plot = sns.scatterplot(x=0, y=1, data=embedded_array, hue=label_name)\n+            sns_plot = sns.scatterplot(x=0, y=1, data=embedded_array,\n+                                       hue=label_name)\n             sns_plot.get_figure().savefig(image_path)\n         else:\n             sns_plot = sns.scatterplot(\n@@ -69,6 +71,8 @@ def create_image(self, filename, label_name, tsne_filename):\n             )\n             sns_plot.get_figure().savefig(image_path)\n \n+        self.spark_session.stop()\n+\n     def file_processor(self):\n         file = self.spark_session.read.format(self.MONGO_SPARK_SOURCE).load()\n \n@@ -84,6 +88,7 @@ def file_processor(self):\n             \"time_created\",\n             \"url\",\n             \"parent_filename\",\n+            \"type\"\n         ]\n         processed_file = file_without_metadata.drop(*metadata_fields)\n \n@@ -107,8 +112,9 @@ def fields_from_dataframe(dataframe, is_string):\n \n \n class MongoOperations:\n-    def __init__(self, database_url, database_port, database_name):\n-        self.mongo_client = MongoClient(database_url, int(database_port))\n+    def __init__(self, database_url, replica_set, database_port, database_name):\n+        self.mongo_client = MongoClient(\n+            database_url + '/?replicaSet=' + replica_set, int(database_port))\n         self.database = self.mongo_client[database_name]\n \n     def find_one(self, filename, query):\n@@ -118,6 +124,21 @@ def find_one(self, filename, query):\n     def get_filenames(self):\n         return self.database.list_collection_names()\n \n+    @staticmethod\n+    def collection_database_url(\n+            database_url, database_name, database_filename, database_replica_set\n+    ):\n+        return (\n+                database_url\n+                + \"/\"\n+                + database_name\n+                + \".\"\n+                + database_filename\n+                + \"?replicaSet=\"\n+                + database_replica_set\n+                + \"&authSource=admin\"\n+        )\n+\n \n class TsneRequestValidator:\n     MESSAGE_INVALID_FILENAME = \"invalid_filename\"\n@@ -134,18 +155,20 @@ def parent_filename_validator(self, filename):\n         if filename not in filenames:\n             raise Exception(self.MESSAGE_INVALID_FILENAME)\n \n-    def tsne_filename_existence_validator(self, tsne_filename):\n+    @staticmethod\n+    def tsne_filename_existence_validator(tsne_filename):\n         images = os.listdir(os.environ[IMAGES_PATH])\n         image_name = tsne_filename + IMAGE_FORMAT\n         if image_name in images:\n-            raise Exception(self.MESSAGE_DUPLICATE_FILE)\n+            raise Exception(TsneRequestValidator.MESSAGE_DUPLICATE_FILE)\n \n-    def no_tsne_filename_existence_validator(self, tsne_filename):\n+    @staticmethod\n+    def tsne_filename_inexistence_validator(tsne_filename):\n         images = os.listdir(os.environ[IMAGES_PATH])\n         image_name = tsne_filename + IMAGE_FORMAT\n \n         if image_name not in images:\n-            raise Exception(self.MESSAGE_NOT_FOUND)\n+            raise Exception(TsneRequestValidator.MESSAGE_NOT_FOUND)\n \n     def filename_label_validator(self, filename, label):\n         if label is None:\n@@ -153,7 +176,8 @@ def filename_label_validator(self, filename, label):\n \n         filename_metadata_query = {\"filename\": filename}\n \n-        filename_metadata = self.database.find_one(filename, filename_metadata_query)\n+        filename_metadata = self.database.find_one(filename,\n+                                                   filename_metadata_query)\n \n         if label not in filename_metadata[\"fields\"]:\n             raise Exception(self.MESSAGE_INVALID_LABEL)\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2020-11-04T14:28:51Z"}